├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ ├── aggregate.py │ ├── check.yml │ ├── cleanup.yml │ ├── deploy.yml │ └── interop.yml ├── .gitignore ├── .travis.yml ├── LICENSE.md ├── README.md ├── cert_config.txt ├── certs.sh ├── docker-compose.yml ├── empty.env ├── implementations.json ├── implementations.py ├── interop.py ├── pull.py ├── requirements.txt ├── result.py ├── run.py ├── setup.cfg ├── testcases.py ├── trace.py └── web ├── Caddyfile ├── cleanup.sh ├── index.html ├── script.js └── styles.css /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [marten-seemann] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "pip" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /.github/workflows/aggregate.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import sys 4 | 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument( 9 | "-s", "--server", help="server implementations (comma-separated)" 10 | ) 11 | parser.add_argument( 12 | "-c", "--client", help="client implementations (comma-separated)" 13 | ) 14 | parser.add_argument("-t", "--start-time", help="start time") 15 | parser.add_argument("-l", "--log-dir", help="log directory") 16 | parser.add_argument("-o", "--output", help="output file (stdout if not set)") 17 | return parser.parse_args() 18 | 19 | 20 | servers = get_args().server.split(",") 21 | clients = get_args().client.split(",") 22 | result = { 23 | "servers": servers, 24 | "clients": clients, 25 | "log_dir": get_args().log_dir, 26 | "start_time": int(get_args().start_time), 27 | "results": [], 28 | "measurements": [], 29 | "tests": {}, 30 | "urls": {}, 31 | } 32 | 33 | 34 | def parse(server: str, client: str, cat: str): 35 | filename = server + "_" + client + "_" + cat + ".json" 36 | try: 37 | with open(filename) as f: 38 | data = json.load(f) 39 | except IOError: 40 | print("Warning: Couldn't open file " + filename) 41 | result[cat].append([]) 42 | return 43 | parse_data(server, client, cat, data) 44 | 45 | 46 | def parse_data(server: str, client: str, cat: str, data: object): 47 | if len(data["servers"]) != 1: 48 | sys.exit("expected exactly one server") 49 | if data["servers"][0] != server: 50 | sys.exit("inconsistent server") 51 | if len(data["clients"]) != 1: 52 | sys.exit("expected exactly one client") 53 | if data["clients"][0] != client: 54 | sys.exit("inconsistent client") 55 | if "end_time" not in result or data["end_time"] > result["end_time"]: 56 | result["end_time"] = data["end_time"] 57 | result[cat].append(data[cat][0]) 58 | result["quic_draft"] = data["quic_draft"] 59 | result["quic_version"] = data["quic_version"] 60 | result["urls"].update(data["urls"]) 61 | result["tests"].update(data["tests"]) 62 | 63 | 64 | for client in clients: 65 | for server in servers: 66 | parse(server, client, "results") 67 | parse(server, client, "measurements") 68 | 69 | if get_args().output: 70 | f = open(get_args().output, "w") 71 | json.dump(result, f) 72 | f.close() 73 | else: 74 | print(json.dumps(result)) 75 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | jobs: 4 | check: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - uses: actions/setup-python@v5 9 | - name: install tools 10 | run: pip install flake8 black 11 | - name: enforce coding styles using flake8 12 | run: flake8 . 13 | - name: run Black linter 14 | if: success() || failure() # run this step even if the previous one failed 15 | run: black --check --diff . 16 | - name: check that implementations.json is valid 17 | if: success() || failure() # run this step even if the previous one failed 18 | run: python implementations.py 19 | - uses: ludeeus/action-shellcheck@master 20 | -------------------------------------------------------------------------------- /.github/workflows/cleanup.yml: -------------------------------------------------------------------------------- 1 | name: cleanup 2 | on: 3 | schedule: 4 | - cron: "0 */8 * * *" # every 8h 5 | 6 | jobs: 7 | cleanup: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Delete old logs on the server 11 | uses: appleboy/ssh-action@2ead5e36573f08b82fbfce1504f1a4b05a647c6f # v1.2.2 12 | with: 13 | host: interop.seemann.io 14 | username: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 15 | key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 16 | script: | 17 | delete_oldest_folder() { 18 | OLDEST_DIR=$(find "${{ vars.LOG_DIR }}" -mindepth 1 -maxdepth 1 -type d -printf '%T+ %p\n' | sort | head -n 1 | cut -d" " -f2-) 19 | if [[ -n "$OLDEST_DIR" ]]; then 20 | echo "Deleting oldest directory: $OLDEST_DIR" 21 | rm -rf "$OLDEST_DIR" 22 | fi 23 | } 24 | 25 | # Loop until enough space is available or no directories left to delete 26 | while true; do 27 | AVAILABLE_SPACE_GB=$(df -BG "${{ vars.LOG_DIR }}" | tail -n 1 | awk '{print $4}' | sed 's/G//') 28 | echo "Available Space: $AVAILABLE_SPACE_GB GB" 29 | 30 | if [[ "$AVAILABLE_SPACE_GB" -lt 50 ]]; then 31 | echo "Less than 50 GB available. Trying to clean up..." 32 | delete_oldest_folder 33 | else 34 | echo "Enough space available." 35 | break 36 | fi 37 | done 38 | 39 | TEMP_FILE=$(mktemp) 40 | find "${{ vars.LOG_DIR }}" -mindepth 1 -maxdepth 1 -type d -not -name 'lost+found' -exec basename {} \; | sort > "$TEMP_FILE" 41 | jq -R -s 'split("\n") | map(select(. != ""))' "$TEMP_FILE" > "${{ vars.LOG_DIR }}/logs.json" 42 | rm -f "$TEMP_FILE" 43 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy website 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Upload website to interop.seemann.io 14 | uses: burnett01/rsync-deployments@796cf0d5e4b535745ce49d7429f77cf39e25ef39 # v7.0.1 15 | with: 16 | switches: -avzr --delete 17 | path: web/ 18 | remote_path: ${{ secrets.INTEROP_SEEMANN_IO_WEBSITE_DIR }} 19 | remote_host: interop.seemann.io 20 | remote_user: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 21 | remote_key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 22 | - name: Restart server 23 | uses: appleboy/ssh-action@2ead5e36573f08b82fbfce1504f1a4b05a647c6f # v1.2.2 24 | with: 25 | host: interop.seemann.io 26 | username: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 27 | key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 28 | script: service website restart 29 | -------------------------------------------------------------------------------- /.github/workflows/interop.yml: -------------------------------------------------------------------------------- 1 | name: interop 2 | on: 3 | schedule: 4 | # Every 8h, at 15 minutes past the hour 5 | # This makes sure that the cleanup cron job can run first. 6 | - cron: "15 */8 * * *" 7 | 8 | jobs: 9 | config: 10 | runs-on: ubuntu-latest 11 | outputs: 12 | logname: ${{ steps.set-logname.outputs.logname }} 13 | starttime: ${{ steps.set-starttime.outputs.starttime }} 14 | servers: ${{ steps.set-servers.outputs.servers }} 15 | clients: ${{ steps.set-clients.outputs.clients }} 16 | images: ${{ steps.set-images.outputs.images }} 17 | steps: 18 | - name: Set log name 19 | id: set-logname 20 | run: | 21 | LOGNAME=$(date -u +"%Y-%m-%dT%H:%M") 22 | echo $LOGNAME 23 | echo "logname=$LOGNAME" >> $GITHUB_OUTPUT 24 | - name: Save start time 25 | id: set-starttime 26 | run: | 27 | STARTTIME=$(date +%s) 28 | echo $STARTTIME 29 | echo "starttime=$STARTTIME" >> $GITHUB_OUTPUT 30 | - uses: actions/checkout@v4 31 | - uses: actions/setup-python@v5 32 | with: 33 | python-version: 3.8 34 | - name: Determine servers 35 | id: set-servers 36 | run: | 37 | SERVERS=$(jq -c 'with_entries(select(.value.role == "server" or .value.role == "both")) | keys_unsorted' implementations.json) 38 | echo $SERVERS 39 | echo "servers=$SERVERS" >> $GITHUB_OUTPUT 40 | - name: Determine clients 41 | id: set-clients 42 | run: | 43 | CLIENTS=$(jq -c 'with_entries(select(.value.role == "client" or .value.role == "both")) | keys_unsorted' implementations.json) 44 | echo $CLIENTS 45 | echo "clients=$CLIENTS" >> $GITHUB_OUTPUT 46 | - name: Determine Docker images 47 | id: set-images 48 | run: | 49 | IMAGES=$(jq -c 'keys_unsorted' implementations.json) 50 | echo $IMAGES 51 | echo "images=$IMAGES" >> $GITHUB_OUTPUT 52 | docker-pull-tools: 53 | runs-on: ubuntu-latest 54 | strategy: 55 | matrix: 56 | image: [ 'quic-network-simulator', 'quic-interop-iperf-endpoint' ] 57 | steps: 58 | - uses: actions/checkout@v4 59 | - name: Pull 60 | run: | 61 | URL="martenseemann/${{ matrix.image }}" 62 | docker pull $URL 63 | echo "URL=$URL" >> $GITHUB_ENV 64 | - name: Docker inspect 65 | run: docker image inspect $URL 66 | - name: Save Docker image 67 | run: | 68 | docker save $URL | gzip --best > ${{ matrix.image }}.tar.gz 69 | du -sh ${{ matrix.image }}.tar.gz 70 | - name: Upload result 71 | uses: actions/upload-artifact@v4 72 | with: 73 | name: images-${{ matrix.image }} 74 | path: ${{ matrix.image }}.tar.gz 75 | if-no-files-found: error 76 | docker-pull-images: 77 | needs: [ config ] 78 | runs-on: ubuntu-latest 79 | strategy: 80 | matrix: 81 | image: ${{ fromJson(needs.config.outputs.images) }} 82 | name: Pull ${{ matrix.image }} 83 | steps: 84 | - uses: actions/checkout@v4 85 | - name: Run docker pull 86 | run: | 87 | URL=$(jq -r '.["${{ matrix.image }}"].image' implementations.json) 88 | echo $URL 89 | docker pull $URL 90 | echo "URL=$URL" >> $GITHUB_ENV 91 | - name: Docker inspect 92 | run: docker image inspect $URL 93 | - name: Save Docker image 94 | run: | 95 | docker save $URL | gzip --best > ${{ matrix.image }}.tar.gz 96 | du -sh ${{ matrix.image }}.tar.gz 97 | - name: Upload result 98 | uses: actions/upload-artifact@v4 99 | with: 100 | name: image-${{ matrix.image }} 101 | path: ${{ matrix.image }}.tar.gz 102 | if-no-files-found: error 103 | tests: 104 | needs: [ config, docker-pull-tools, docker-pull-images ] 105 | runs-on: ubuntu-latest 106 | continue-on-error: true 107 | timeout-minutes: 45 108 | strategy: 109 | fail-fast: false 110 | matrix: 111 | server: ${{ fromJson(needs.config.outputs.servers) }} 112 | client: ${{ fromJson(needs.config.outputs.clients) }} 113 | name: (${{ matrix.server }} - ${{ matrix.client }}) 114 | steps: 115 | - uses: actions/checkout@v4 116 | - uses: actions/setup-python@v5 117 | with: 118 | python-version: 3.8 119 | - name: Enable IPv6 support 120 | run: sudo modprobe ip6table_filter 121 | - run: docker image ls 122 | - name: Download quic-network-simulator image 123 | uses: actions/download-artifact@v4 124 | with: 125 | name: images-quic-network-simulator 126 | - name: Download quic-interop-iperf-endpoint image 127 | uses: actions/download-artifact@v4 128 | with: 129 | name: images-quic-interop-iperf-endpoint 130 | - name: Download ${{ matrix.server }} Docker image 131 | uses: actions/download-artifact@v4 132 | with: 133 | name: image-${{ matrix.server }} 134 | - name: Download ${{ matrix.client }} Docker image 135 | if: ${{ matrix.server != matrix.client }} 136 | uses: actions/download-artifact@v4 137 | with: 138 | name: image-${{ matrix.client }} 139 | - name: Load docker images 140 | run: | 141 | docker load --input quic-network-simulator.tar.gz 142 | docker load --input quic-interop-iperf-endpoint.tar.gz 143 | docker load --input ${{ matrix.server }}.tar.gz 144 | docker load --input ${{ matrix.client }}.tar.gz 145 | - run: docker image ls 146 | - name: Install Wireshark 147 | run: | 148 | sudo add-apt-repository ppa:wireshark-dev/nightly 149 | sudo apt-get update 150 | sudo apt-get install -y --no-install-recommends tshark 151 | - name: Install Python packages 152 | run: | 153 | pip install -U pip 154 | pip install -r requirements.txt 155 | - name: Run tests 156 | env: 157 | CRON: "true" 158 | run: | 159 | (python run.py --client ${{ matrix.client }} --server ${{ matrix.server }} --log-dir logs --json ${{ matrix.server }}_${{ matrix.client }}_results.json -t onlyTests || true) | tee output.txt 160 | mkdir -p logs/${{ matrix.server }}_${{ matrix.client }} 161 | mv output.txt logs/${{ matrix.server }}_${{ matrix.client }}/ 162 | - name: Run measurements 163 | env: 164 | CRON: "true" 165 | run: | 166 | python run.py --client ${{ matrix.client }} --server ${{ matrix.server }} --log-dir logs_measurement --json ${{ matrix.server }}_${{ matrix.client }}_measurements.json -t onlyMeasurements || true 167 | if [ ! -d "logs_measurement" ]; then exit 0; fi 168 | find logs_measurement -depth -name "sim" -type d -exec rm -r "{}" \; 169 | find logs_measurement -depth -name "client" -type d -exec rm -r "{}" \; 170 | find logs_measurement -depth -name "server" -type d -exec rm -r "{}" \; 171 | mv logs_measurement/${{ matrix.server }}_${{ matrix.client }}/* logs/${{ matrix.server }}_${{ matrix.client }}/ 172 | - name: Upload logs to interop.seemann.io 173 | uses: burnett01/rsync-deployments@796cf0d5e4b535745ce49d7429f77cf39e25ef39 # v7.0.1 174 | if: ${{ github.event_name == 'schedule' }} 175 | with: 176 | switches: -avzr --relative 177 | path: logs/./${{ matrix.server }}_${{ matrix.client }}/ 178 | remote_path: ${{ vars.LOG_DIR }}/${{ needs.config.outputs.logname }} 179 | remote_host: interop.seemann.io 180 | remote_user: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 181 | remote_key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 182 | - name: Upload result 183 | uses: actions/upload-artifact@v4 184 | with: 185 | name: results-${{ matrix.server }}-${{ matrix.client }} 186 | if-no-files-found: error 187 | path: | 188 | ${{ matrix.server }}_${{ matrix.client }}_results.json 189 | ${{ matrix.server }}_${{ matrix.client }}_measurements.json 190 | aggregate: 191 | needs: [ config, tests ] 192 | runs-on: ubuntu-latest 193 | if: always() 194 | env: 195 | LOGNAME: ${{ needs.config.outputs.logname }} 196 | steps: 197 | - uses: actions/checkout@v4 198 | - uses: actions/setup-python@v5 199 | with: 200 | python-version: 3.8 201 | - name: Download results 202 | uses: actions/download-artifact@v4 203 | with: 204 | pattern: results-* 205 | - name: Aggregate results 206 | run: | 207 | mv results-*/*.json . 208 | python .github/workflows/aggregate.py \ 209 | --start-time ${{ needs.config.outputs.starttime }} \ 210 | --server ${{ join(fromJson(needs.config.outputs.servers), ',') }} \ 211 | --client ${{ join(fromJson(needs.config.outputs.clients), ',') }} \ 212 | --log-dir=$LOGNAME \ 213 | --output result.json 214 | - name: Print result 215 | run: jq '.' result.json 216 | - name: Upload result to artifacts 217 | uses: actions/upload-artifact@v4 218 | with: 219 | name: result-aggregated 220 | path: result.json 221 | - name: Upload logs to interop.seemann.io 222 | uses: burnett01/rsync-deployments@796cf0d5e4b535745ce49d7429f77cf39e25ef39 # v7.0.1 223 | if: ${{ github.event_name == 'schedule' }} 224 | with: 225 | switches: -avzr 226 | path: result.json 227 | remote_path: ${{ vars.LOG_DIR }}/${{ needs.config.outputs.logname }}/ 228 | remote_host: interop.seemann.io 229 | remote_user: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 230 | remote_key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 231 | - name: Point interop.seemann.io to the latest result 232 | uses: appleboy/ssh-action@2ead5e36573f08b82fbfce1504f1a4b05a647c6f # v1.2.2 233 | if: ${{ github.event_name == 'schedule' }} 234 | with: 235 | host: interop.seemann.io 236 | username: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 237 | key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 238 | envs: LOGNAME 239 | script: | 240 | cd ${{ vars.LOG_DIR }} 241 | jq '. += [ "${{ needs.config.outputs.logname }}" ]' logs.json | sponge logs.json 242 | rm latest || true 243 | ln -s $LOGNAME latest 244 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | certs/ 2 | logs/ 3 | logs_*/ 4 | *.json 5 | !implementations.json 6 | web/latest 7 | 8 | *.egg-info/ 9 | __pycache__ 10 | build/ 11 | dist/ 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | install: pip install -r requirements.txt black flake8 isort 3 | language: python 4 | python: "3.8" 5 | script: .travis/script 6 | sudo: true 7 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2019 Jana Iyengar, Marten Seemann 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Interop Test Runner 2 | 3 | The Interop Test Runner aims to automatically generate an interop matrix by running multiple **test cases** using different QUIC implementations. 4 | 5 | * Research Article: [Automating QUIC Interoperability Testing](https://dl.acm.org/doi/10.1145/3405796.3405826) 6 | * IETF Blog Post: [Automating interoperability testing to improve open standards for the Internet](https://www.ietf.org/blog/quic-automated-interop-testing/) 7 | 8 | ## Requirements 9 | 10 | The Interop Runner is written in Python 3. You'll need to install the 11 | following softwares to run the interop test: 12 | 13 | * Python3 modules. Run the following command: 14 | 15 | ```bash 16 | pip3 install -r requirements.txt 17 | ``` 18 | 19 | * [Docker](https://docs.docker.com/engine/install/) and [docker compose](https://docs.docker.com/compose/). 20 | 21 | * [Development version of Wireshark](https://www.wireshark.org/download.html) (version 4.5.0 or newer). 22 | 23 | ## Running the Interop Runner 24 | 25 | Run the interop tests: 26 | 27 | ```bash 28 | python3 run.py 29 | ``` 30 | 31 | ## IPv6 support 32 | 33 | To enable IPv6 support for the simulator on Linux, the `ip6table_filter` kernel module needs to be loaded on the host. If it isn't loaded on your machine, you'll need to run `sudo modprobe ip6table_filter`. 34 | 35 | ## Building a QUIC endpoint 36 | 37 | To include your QUIC implementation in the Interop Runner, create a Docker image following the instructions for [setting up an endpoint in the quic-network-simulator](https://github.com/quic-interop/quic-network-simulator), publish it on [Docker Hub](https://hub.docker.com) and add it to [implementations.json](implementations.json). Once your implementation is ready to interop, please send us a PR with this addition. Read on for more instructions on what to do within the Docker image. 38 | 39 | Typically, a test case will require a server to serve files from a directory, and a client to download files. Different test cases will specify the behavior to be tested. For example, the Retry test case expects the server to use a Retry before accepting the connection from the client. All configuration information from the test framework to your implementation is fed into the Docker image using environment variables. The test case is passed into your Docker container using the `TESTCASE` environment variable. If your implementation doesn't support a test case, it MUST exit with status code 127. This will allow us to add new test cases in the future, and correctly report test failures und successes, even if some implementations have not yet implented support for this new test case. 40 | 41 | The Interop Runner mounts the directory `/www` into your server Docker container. This directory will contain one or more randomly generated files. Your server implementation is expected to run on port 443 and serve files from this directory. 42 | Equivalently, the Interop Runner mounts `/downloads` into your client Docker container. The directory is initially empty, and your client implementation is expected to store downloaded files into this directory. The URLs of the files to download are passed to the client using the environment variable `REQUESTS`, which contains one or more URLs, separated by a space. 43 | 44 | After the transfer is completed, the client container is expected to exit with exit status 0. If an error occurred during the transfer, the client is expected to exit with exit status 1. 45 | After completion of the test case, the Interop Runner will verify that the client downloaded the files it was expected to transfer, and that the file contents match. Additionally, for certain test cases, the Interop Runner will use the pcap of the transfer to verify that the implementations fulfilled the requirements of the test (for example, for the Retry test case, the pcap should show that a Retry packet was sent, and that the client used the Token provided in that packet). 46 | 47 | The Interop Runner generates a key and a certificate chain and mounts it into `/certs`. The server needs to load its private key from `priv.key`, and the certificate chain from `cert.pem`. 48 | 49 | ### Examples 50 | 51 | If you're not familiar with Docker, it might be helpful to have a look at the Dockerfiles and scripts that other implementations use: 52 | 53 | * quic-go: [Dockerfile](https://github.com/lucas-clemente/quic-go/blob/master/interop/Dockerfile), [run_endpoint.sh](https://github.com/lucas-clemente/quic-go/blob/master/interop/run_endpoint.sh) and [CI config](https://github.com/lucas-clemente/quic-go/blob/master/.github/workflows/build-interop-docker.yml) 54 | * quicly: [Dockerfile](https://github.com/h2o/quicly/blob/master/misc/quic-interop-runner/Dockerfile) and [run_endpoint.sh](https://github.com/h2o/quicly/blob/master/misc/quic-interop-runner/run_endpoint.sh) and [run_endpoint.sh](https://github.com/cloudflare/quiche/blob/master/tools/qns/run_endpoint.sh) 55 | * quant: [Dockerfile](https://github.com/NTAP/quant/blob/master/Dockerfile.interop) and [run_endpoint.sh](https://github.com/NTAP/quant/blob/master/test/interop.sh), built on [DockerHub](https://hub.docker.com/r/ntap/quant) 56 | * quiche: [Dockerfile](https://github.com/cloudflare/quiche/blob/master/Dockerfile) 57 | * neqo: [Dockerfile](https://github.com/mozilla/neqo/blob/main/qns/Dockerfile) and [interop.sh](https://github.com/mozilla/neqo/blob/main/qns/interop.sh) 58 | * msquic: [Dockerfile](https://github.com/microsoft/msquic/blob/master/Dockerfile), [run_endpoint.sh](https://github.com/microsoft/msquic/blob/master/scripts/run_endpoint.sh) and [CI config](https://github.com/microsoft/msquic/blob/master/.azure/azure-pipelines.docker.yml) 59 | 60 | Implementers: Please feel free to add links to your implementation here! 61 | 62 | Note that the [online interop](https://interop.seemann.io/) runner requires `linux/amd64` architecture, so if you build on a different architecture (e.g. "Apple silicon"), you would need to use `--platform linux/amd64` with `docker build` to create a compatible image. 63 | Even better, and the recommended approach, is to use a multi-platform build to provide both `amd64` and `arm64` images, so everybody can run the interop locally with your implementation. To build the multi-platform image, you can use the `docker buildx` command: 64 | 65 | ```bash 66 | docker buildx create --use 67 | docker buildx build --pull --push --platform linux/amd64,linux/arm64 -t . 68 | ``` 69 | 70 | ## Logs 71 | 72 | To facilitate debugging, the Interop Runner saves the log files to the logs directory. This directory is overwritten every time the Interop Runner is executed. 73 | 74 | The log files are saved to a directory named `#server_#client/#testcase`. `output.txt` contains the console output of the interop test runner (which might contain information why a test case failed). The server and client logs are saved in the `server` and `client` directory, respectively. The `sim` directory contains pcaps recorded by the simulator. 75 | 76 | If implementations wish to export the TLS secrets, they are encouraged to do so in the format in the [NSS Key Log format](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format). The interop runner sets the SSLKEYLOGFILE environment variable to a file in the logs directory. In the future, the interop runner might use those files to decode the traces. 77 | 78 | Implementations that implement [qlog](https://github.com/quiclog/internet-drafts) should export the log files to the directory specified by the `QLOGDIR` environment variable. 79 | 80 | ## Test cases 81 | 82 | The Interop Runner implements the following test cases. Unless noted otherwise, test cases use HTTP/0.9 for file transfers. More test cases will be added in the future, to test more protocol features. The name in parentheses is the value of the `TESTCASE` environment variable passed into your Docker container. 83 | 84 | * **Version Negotiation** (`versionnegotiation`): Tests that a server sends a Version Negotiation packet in response to an unknown QUIC version number. The client should start a connection using an unsupported version number (it can use a reserved version number to do so), and should abort the connection attempt when receiving the Version Negotiation packet. 85 | Currently disabled due to #20. 86 | 87 | * **Handshake** (`handshake`): Tests the successful completion of the handshake. The client is expected to establish a single QUIC connection to the server and download one or multiple small files. Servers should not send a Retry packet in this test case. 88 | 89 | * **Transfer** (`transfer`): Tests both flow control and stream multiplexing. The client should use small initial flow control windows for both stream- and connection-level flow control, such that the during the transfer of files on the order of 1 MB the flow control window needs to be increased. The client is exepcted to establish a single QUIC connection, and use multiple streams to concurrently download the files. 90 | 91 | * **ChaCha20** (`chacha20`): In this test, client and server are expected to offer **only** ChaCha20 as a ciphersuite. The client then downloads the files. 92 | 93 | * **KeyUpdate** (`keyupdate`, only for the client): The client is expected to make sure that a key update happens early in the connection (during the first MB transferred). It doesn't matter which peer actually initiated the update. 94 | 95 | * **Retry** (`retry`): Tests that the server can generate a Retry, and that the client can act upon it (i.e. use the Token provided in the Retry packet in the Initial packet). 96 | 97 | * **Resumption** (`resumption`): Tests QUIC session resumption (without 0-RTT). The client is expected to establish a connection and download the first file. The server is expected to provide the client with a session ticket that allows it to resume the connection. After downloading the first file, the client has to close the connection, establish a resumed connection using the session ticket, and use this connection to download the remaining file(s). 98 | 99 | * **0-RTT** (`zerortt`): Tests QUIC 0-RTT. The client is expected to establish a connection and download the first file. The server is expected to provide the client with a session ticket that allows it establish a 0-RTT connection on the next connection attempt. After downloading the first file, the client has to close the connection, establish and request the remaining file(s) in 0-RTT. 100 | 101 | * **HTTP3** (`http3`): Tests a simple HTTP/3 connection. The client is expected to download multiple files using HTTP/3. Files should be requested and transfered in parallel. 102 | 103 | * **Handshake Loss** (`multiconnect`): Tests resilience of the handshake to high loss. The client is expected to establish multiple connections, sequential or in parallel, and use each connection to download a single file. 104 | 105 | * **V2** (`v2`): In this test, client starts connecting server in QUIC v1 with `version_information` transport parameter that includes QUIC v2 (`0x6b3343cf`) in `other_versions` field. Server should select QUIC v2 in compatible version negotiation. Client is expected to download one small file in QUIC v2. 106 | 107 | * **Port Rebinding** (`rebind-port`): In this test case, a NAT is simulated that changes the client port (as observed by the server) after the handshake. Server should perform path vaildation. 108 | 109 | * **Address Rebinding** (`rebind-addr`): In this test case, a NAT is simulated that changes the client IP address (as observed by the server) after the handshake. Server should perform path vaildation. 110 | 111 | * **Connection Migratioon** (`connectionmigration`): In this test case, the server is expected to provide its preferred addresses to the client during the handshake. The client is expected to perform active migration to one of those addresses. 112 | -------------------------------------------------------------------------------- /cert_config.txt: -------------------------------------------------------------------------------- 1 | [ req ] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_ca 4 | dirstring_type = nobmp 5 | [ req_distinguished_name ] 6 | [ v3_ca ] 7 | keyUsage=critical, keyCertSign 8 | subjectKeyIdentifier=hash 9 | authorityKeyIdentifier=keyid:always,issuer:always 10 | basicConstraints=critical,CA:TRUE,pathlen:100 11 | -------------------------------------------------------------------------------- /certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | if [ -z "$1" ] || [ -z "$2" ]; then 6 | echo "$0 " 7 | exit 1 8 | fi 9 | 10 | CERTDIR=$1 11 | CHAINLEN=$2 12 | 13 | mkdir -p "$CERTDIR" || true 14 | 15 | # Generate Root CA and certificate 16 | openssl ecparam -name prime256v1 -genkey -out "$CERTDIR"/ca_0.key 17 | openssl req -x509 -sha256 -nodes -days 10 -key "$CERTDIR"/ca_0.key \ 18 | -out "$CERTDIR"/cert_0.pem \ 19 | -subj "/O=interop runner Root Certificate Authority/" \ 20 | -config cert_config.txt \ 21 | -extensions v3_ca \ 22 | 2>/dev/null 23 | 24 | # Inflate certificate for the amplification test 25 | fakedns="" 26 | if [ "$CHAINLEN" != "1" ]; then 27 | for i in $(seq 1 20); do 28 | fakedns="$fakedns,DNS:$(LC_CTYPE=C tr -dc '[:alnum:]' /dev/null 43 | 44 | # Sign the certificate 45 | j=$((i - 1)) 46 | if [[ $i < "$CHAINLEN" ]]; then 47 | openssl x509 -req -sha256 -days 10 -in "$CERTDIR"/cert.csr -out "$CERTDIR"/cert_"$i".pem \ 48 | -CA "$CERTDIR"/cert_"$j".pem -CAkey "$CERTDIR"/ca_"$j".key -CAcreateserial \ 49 | -extfile cert_config.txt \ 50 | -extensions v3_ca \ 51 | 2>/dev/null 52 | else 53 | openssl x509 -req -sha256 -days 10 -in "$CERTDIR"/cert.csr -out "$CERTDIR"/cert_"$i".pem \ 54 | -CA "$CERTDIR"/cert_"$j".pem -CAkey "$CERTDIR"/ca_"$j".key -CAcreateserial \ 55 | -extfile <(printf "subjectAltName=DNS:server,DNS:server4,DNS:server6,DNS:server46%s" "$fakedns") \ 56 | 2>/dev/null 57 | fi 58 | done 59 | 60 | mv "$CERTDIR"/cert_0.pem "$CERTDIR"/ca.pem 61 | cp "$CERTDIR"/ca_"$CHAINLEN".key "$CERTDIR"/priv.key 62 | 63 | # combine certificates 64 | for i in $(seq "$CHAINLEN" -1 1); do 65 | cat "$CERTDIR"/cert_"$i".pem >>"$CERTDIR"/cert.pem 66 | rm "$CERTDIR"/cert_"$i".pem "$CERTDIR"/ca_"$i".key 67 | done 68 | rm -f "$CERTDIR"/*.srl "$CERTDIR"/ca_0.key "$CERTDIR"/cert.csr 69 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | sim: 3 | image: martenseemann/quic-network-simulator 4 | container_name: sim 5 | hostname: sim 6 | stdin_open: true 7 | tty: true 8 | environment: 9 | - WAITFORSERVER=$WAITFORSERVER 10 | - SCENARIO=$SCENARIO 11 | cap_add: 12 | - NET_ADMIN 13 | - NET_RAW 14 | expose: 15 | - "57832" 16 | networks: 17 | leftnet: 18 | ipv4_address: 193.167.0.2 19 | ipv6_address: fd00:cafe:cafe:0::2 20 | rightnet: 21 | ipv4_address: 193.167.100.2 22 | ipv6_address: fd00:cafe:cafe:100::2 23 | extra_hosts: 24 | - "server:193.167.100.100" 25 | 26 | server: 27 | image: $SERVER 28 | container_name: server 29 | hostname: server 30 | stdin_open: true 31 | tty: true 32 | volumes: 33 | - $WWW:/www:ro 34 | - $CERTS:/certs:ro 35 | environment: 36 | - CRON=$CRON 37 | - ROLE=server 38 | - SERVER_PARAMS=$SERVER_PARAMS 39 | - SSLKEYLOGFILE=/logs/keys.log 40 | - QLOGDIR=/logs/qlog/ 41 | - TESTCASE=$TESTCASE_SERVER 42 | depends_on: 43 | - sim 44 | cap_add: 45 | - NET_ADMIN 46 | ulimits: 47 | memlock: 67108864 48 | networks: 49 | rightnet: 50 | ipv4_address: 193.167.100.100 51 | ipv6_address: fd00:cafe:cafe:100::100 52 | extra_hosts: 53 | - "server4:193.167.100.100" 54 | - "server6:fd00:cafe:cafe:100::100" 55 | 56 | client: 57 | image: $CLIENT 58 | container_name: client 59 | hostname: client 60 | stdin_open: true 61 | tty: true 62 | volumes: 63 | - $DOWNLOADS:/downloads:delegated 64 | - $CERTS:/certs:ro 65 | environment: 66 | - CRON=$CRON 67 | - ROLE=client 68 | - CLIENT_PARAMS=$CLIENT_PARAMS 69 | - SSLKEYLOGFILE=/logs/keys.log 70 | - QLOGDIR=/logs/qlog/ 71 | - TESTCASE=$TESTCASE_CLIENT 72 | - REQUESTS=$REQUESTS 73 | depends_on: 74 | - sim 75 | cap_add: 76 | - NET_ADMIN 77 | ulimits: 78 | memlock: 67108864 79 | networks: 80 | leftnet: 81 | ipv4_address: 193.167.0.100 82 | ipv6_address: fd00:cafe:cafe:0::100 83 | extra_hosts: 84 | - "server4:193.167.100.100" 85 | - "server6:fd00:cafe:cafe:100::100" 86 | - "server46:193.167.100.100" 87 | - "server46:fd00:cafe:cafe:100::100" 88 | 89 | iperf_server: 90 | image: martenseemann/quic-interop-iperf-endpoint 91 | container_name: iperf_server 92 | stdin_open: true 93 | tty: true 94 | environment: 95 | - ROLE=server 96 | - CLIENT=client4 97 | - IPERF_CONGESTION=$IPERF_CONGESTION 98 | depends_on: 99 | - sim 100 | cap_add: 101 | - NET_ADMIN 102 | networks: 103 | rightnet: 104 | ipv4_address: 193.167.100.110 105 | ipv6_address: fd00:cafe:cafe:100::110 106 | extra_hosts: 107 | - "client4:193.167.0.90" 108 | - "client6:fd00:cafe:cafe:0::100" 109 | - "client46:193.167.0.90" 110 | - "client46:fd00:cafe:cafe:0::100" 111 | 112 | iperf_client: 113 | image: martenseemann/quic-interop-iperf-endpoint 114 | container_name: iperf_client 115 | stdin_open: true 116 | tty: true 117 | environment: 118 | - ROLE=client 119 | - IPERF_CONGESTION=$IPERF_CONGESTION 120 | depends_on: 121 | - sim 122 | cap_add: 123 | - NET_ADMIN 124 | networks: 125 | leftnet: 126 | ipv4_address: 193.167.0.90 127 | ipv6_address: fd00:cafe:cafe:0::90 128 | extra_hosts: 129 | - "server4:193.167.100.110" 130 | - "server6:fd00:cafe:cafe:100::110" 131 | - "server46:193.167.100.110" 132 | - "server46:fd00:cafe:cafe:100::110" 133 | 134 | networks: 135 | leftnet: 136 | driver: bridge 137 | driver_opts: 138 | com.docker.network.bridge.enable_ip_masquerade: 'false' 139 | enable_ipv6: true 140 | ipam: 141 | config: 142 | - subnet: 193.167.0.0/24 143 | - subnet: fd00:cafe:cafe:0::/64 144 | rightnet: 145 | driver: bridge 146 | driver_opts: 147 | com.docker.network.bridge.enable_ip_masquerade: 'false' 148 | enable_ipv6: true 149 | ipam: 150 | config: 151 | - subnet: 193.167.100.0/24 152 | - subnet: fd00:cafe:cafe:100::/64 153 | 154 | -------------------------------------------------------------------------------- /empty.env: -------------------------------------------------------------------------------- 1 | CLIENT_PARAMS="" 2 | CERTS="" 3 | CRON="" 4 | DOWNLOADS="" 5 | IPERF_CONGESTION="" 6 | QLOGDIR="" 7 | REQUESTS="" 8 | SCENARIO="" 9 | SERVER_PARAMS="" 10 | SSLKEYLOGFILE="" 11 | TESTCASE_CLIENT="" 12 | TESTCASE_SERVER="" 13 | WWW="" 14 | WAITFORSERVER="" 15 | -------------------------------------------------------------------------------- /implementations.json: -------------------------------------------------------------------------------- 1 | { 2 | "quic-go": { 3 | "image": "martenseemann/quic-go-interop:latest", 4 | "url": "https://github.com/quic-go/quic-go", 5 | "role": "both" 6 | }, 7 | "ngtcp2": { 8 | "image": "ghcr.io/ngtcp2/ngtcp2-interop:latest", 9 | "url": "https://github.com/ngtcp2/ngtcp2", 10 | "role": "both" 11 | }, 12 | "mvfst": { 13 | "image": "ghcr.io/facebook/proxygen/mvfst-interop:latest", 14 | "url": "https://github.com/facebookincubator/mvfst", 15 | "role": "both" 16 | }, 17 | "quiche": { 18 | "image": "cloudflare/quiche-qns:latest", 19 | "url": "https://github.com/cloudflare/quiche", 20 | "role": "both" 21 | }, 22 | "kwik": { 23 | "image": "peterdoornbosch/kwik_n_flupke-interop", 24 | "url": "https://github.com/ptrd/kwik", 25 | "role": "both" 26 | }, 27 | "picoquic": { 28 | "image": "privateoctopus/picoquic:latest", 29 | "url": "https://github.com/private-octopus/picoquic", 30 | "role": "both" 31 | }, 32 | "aioquic": { 33 | "image": "aiortc/aioquic-qns:latest", 34 | "url": "https://github.com/aiortc/aioquic", 35 | "role": "both" 36 | }, 37 | "neqo": { 38 | "image": "ghcr.io/mozilla/neqo-qns:latest", 39 | "url": "https://github.com/mozilla/neqo", 40 | "role": "both" 41 | }, 42 | "nginx": { 43 | "image": "ghcr.io/nginx/nginx-quic-qns:latest", 44 | "url": "https://quic.nginx.org/", 45 | "role": "server" 46 | }, 47 | "msquic": { 48 | "image": "ghcr.io/microsoft/msquic/qns:main", 49 | "url": "https://github.com/microsoft/msquic", 50 | "role": "both" 51 | }, 52 | "chrome": { 53 | "image": "martenseemann/chrome-quic-interop-runner", 54 | "url": "https://github.com/quic-interop/chrome-quic-interop-runner", 55 | "role": "client" 56 | }, 57 | "xquic": { 58 | "image": "ghcr.io/alibaba/xquic/xquic-interop:latest", 59 | "url": "https://github.com/alibaba/xquic", 60 | "role": "both" 61 | }, 62 | "lsquic": { 63 | "image": "litespeedtech/lsquic-qir:latest", 64 | "url": "https://github.com/litespeedtech/lsquic", 65 | "role": "both" 66 | }, 67 | "haproxy": { 68 | "image": "haproxytech/haproxy-qns:latest", 69 | "url": "https://github.com/haproxy/haproxy", 70 | "role": "server" 71 | }, 72 | "quinn": { 73 | "image": "stammw/quinn-interop:latest", 74 | "url": "https://github.com/quinn-rs/quinn", 75 | "role": "both" 76 | }, 77 | "s2n-quic": { 78 | "image": "ghcr.io/aws/s2n-quic/s2n-quic-qns:latest", 79 | "url": "https://github.com/aws/s2n-quic", 80 | "role": "both" 81 | }, 82 | "go-x-net": { 83 | "image": "us-central1-docker.pkg.dev/golang-interop-testing/quic/go-x-net:latest", 84 | "url": "https://pkg.go.dev/golang.org/x/net/internal/quic", 85 | "role": "both" 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /implementations.py: -------------------------------------------------------------------------------- 1 | import json 2 | from enum import Enum 3 | 4 | IMPLEMENTATIONS = {} 5 | 6 | 7 | class Role(Enum): 8 | BOTH = "both" 9 | SERVER = "server" 10 | CLIENT = "client" 11 | 12 | 13 | with open("implementations.json", "r") as f: 14 | data = json.load(f) 15 | for name, val in data.items(): 16 | IMPLEMENTATIONS[name] = {"image": val["image"], "url": val["url"]} 17 | role = val["role"] 18 | if role == "server": 19 | IMPLEMENTATIONS[name]["role"] = Role.SERVER 20 | elif role == "client": 21 | IMPLEMENTATIONS[name]["role"] = Role.CLIENT 22 | elif role == "both": 23 | IMPLEMENTATIONS[name]["role"] = Role.BOTH 24 | else: 25 | raise Exception("unknown role: " + role) 26 | -------------------------------------------------------------------------------- /interop.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import random 5 | import re 6 | import shutil 7 | import statistics 8 | import string 9 | import subprocess 10 | import sys 11 | import tempfile 12 | from datetime import datetime 13 | from typing import Callable, List, Tuple 14 | 15 | import prettytable 16 | from termcolor import colored 17 | 18 | import testcases 19 | from result import TestResult 20 | from testcases import Perspective 21 | 22 | 23 | def random_string(length: int): 24 | """Generate a random string of fixed length""" 25 | letters = string.ascii_lowercase 26 | return "".join(random.choice(letters) for i in range(length)) 27 | 28 | 29 | class MeasurementResult: 30 | result = TestResult 31 | details = str 32 | 33 | 34 | class LogFileFormatter(logging.Formatter): 35 | def format(self, record): 36 | msg = super(LogFileFormatter, self).format(record) 37 | # remove color control characters 38 | return re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]").sub("", msg) 39 | 40 | 41 | class InteropRunner: 42 | _start_time = 0 43 | test_results = {} 44 | measurement_results = {} 45 | compliant = {} 46 | _implementations = {} 47 | _client_server_pairs = [] 48 | _tests = [] 49 | _measurements = [] 50 | _output = "" 51 | _markdown = False 52 | _log_dir = "" 53 | _save_files = False 54 | _no_auto_unsupported = [] 55 | 56 | def __init__( 57 | self, 58 | implementations: dict, 59 | client_server_pairs: List[Tuple[str, str]], 60 | tests: List[testcases.TestCase], 61 | measurements: List[testcases.Measurement], 62 | output: str, 63 | markdown: bool, 64 | debug: bool, 65 | save_files=False, 66 | log_dir="", 67 | no_auto_unsupported=[], 68 | ): 69 | logger = logging.getLogger() 70 | logger.setLevel(logging.DEBUG) 71 | console = logging.StreamHandler(stream=sys.stderr) 72 | if debug: 73 | console.setLevel(logging.DEBUG) 74 | else: 75 | console.setLevel(logging.INFO) 76 | logger.addHandler(console) 77 | self._start_time = datetime.now() 78 | self._tests = tests 79 | self._measurements = measurements 80 | self._client_server_pairs = client_server_pairs 81 | self._implementations = implementations 82 | self._output = output 83 | self._markdown = markdown 84 | self._log_dir = log_dir 85 | self._save_files = save_files 86 | self._no_auto_unsupported = no_auto_unsupported 87 | if len(self._log_dir) == 0: 88 | self._log_dir = "logs_{:%Y-%m-%dT%H:%M:%S}".format(self._start_time) 89 | if os.path.exists(self._log_dir): 90 | sys.exit("Log dir " + self._log_dir + " already exists.") 91 | logging.info("Saving logs to %s.", self._log_dir) 92 | for client, server in client_server_pairs: 93 | for test in self._tests: 94 | self.test_results.setdefault(server, {}).setdefault( 95 | client, {} 96 | ).setdefault(test, {}) 97 | for measurement in measurements: 98 | self.measurement_results.setdefault(server, {}).setdefault( 99 | client, {} 100 | ).setdefault(measurement, {}) 101 | 102 | def _is_unsupported(self, lines: List[str]) -> bool: 103 | return any("exited with code 127" in str(line) for line in lines) or any( 104 | "exit status 127" in str(line) for line in lines 105 | ) 106 | 107 | def _check_impl_is_compliant(self, name: str) -> bool: 108 | """check if an implementation return UNSUPPORTED for unknown test cases""" 109 | if name in self.compliant: 110 | logging.debug( 111 | "%s already tested for compliance: %s", name, str(self.compliant) 112 | ) 113 | return self.compliant[name] 114 | 115 | client_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_client_") 116 | www_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="compliance_www_") 117 | certs_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="compliance_certs_") 118 | downloads_dir = tempfile.TemporaryDirectory( 119 | dir="/tmp", prefix="compliance_downloads_" 120 | ) 121 | 122 | testcases.generate_cert_chain(certs_dir.name) 123 | 124 | # check that the client is capable of returning UNSUPPORTED 125 | logging.debug("Checking compliance of %s client", name) 126 | cmd = ( 127 | "CERTS=" + certs_dir.name + " " 128 | "TESTCASE_CLIENT=" + random_string(6) + " " 129 | "SERVER_LOGS=/dev/null " 130 | "CLIENT_LOGS=" + client_log_dir.name + " " 131 | "WWW=" + www_dir.name + " " 132 | "DOWNLOADS=" + downloads_dir.name + " " 133 | 'SCENARIO="simple-p2p --delay=15ms --bandwidth=10Mbps --queue=25" ' 134 | "CLIENT=" + self._implementations[name]["image"] + " " 135 | "SERVER=" 136 | + self._implementations[name]["image"] 137 | + " " # only needed so docker compose doesn't complain 138 | "docker compose --env-file empty.env up --timeout 0 --abort-on-container-exit -V sim client" 139 | ) 140 | output = subprocess.run( 141 | cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT 142 | ) 143 | if not self._is_unsupported(output.stdout.splitlines()): 144 | logging.error("%s client not compliant.", name) 145 | logging.debug("%s", output.stdout.decode("utf-8", errors="replace")) 146 | self.compliant[name] = False 147 | return False 148 | logging.debug("%s client compliant.", name) 149 | 150 | # check that the server is capable of returning UNSUPPORTED 151 | logging.debug("Checking compliance of %s server", name) 152 | server_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_server_") 153 | cmd = ( 154 | "CERTS=" + certs_dir.name + " " 155 | "TESTCASE_SERVER=" + random_string(6) + " " 156 | "SERVER_LOGS=" + server_log_dir.name + " " 157 | "CLIENT_LOGS=/dev/null " 158 | "WWW=" + www_dir.name + " " 159 | "DOWNLOADS=" + downloads_dir.name + " " 160 | "CLIENT=" 161 | + self._implementations[name]["image"] 162 | + " " # only needed so docker compose doesn't complain 163 | "SERVER=" + self._implementations[name]["image"] + " " 164 | "docker compose --env-file empty.env up -V server" 165 | ) 166 | output = subprocess.run( 167 | cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT 168 | ) 169 | if not self._is_unsupported(output.stdout.splitlines()): 170 | logging.error("%s server not compliant.", name) 171 | logging.debug("%s", output.stdout.decode("utf-8", errors="replace")) 172 | self.compliant[name] = False 173 | return False 174 | logging.debug("%s server compliant.", name) 175 | 176 | # remember compliance test outcome 177 | self.compliant[name] = True 178 | return True 179 | 180 | def _postprocess_results(self): 181 | clients = list(set(client for client, _ in self._client_server_pairs)) 182 | servers = list(set(server for _, server in self._client_server_pairs)) 183 | questionable = [TestResult.FAILED, TestResult.UNSUPPORTED] 184 | # If a client failed a test against all servers, make the test unsupported for the client 185 | if len(servers) > 1: 186 | for c in set(clients) - set(self._no_auto_unsupported): 187 | for t in self._tests: 188 | if all(self.test_results[s][c][t] in questionable for s in servers): 189 | print( 190 | f"Client {c} failed or did not support test {t.name()} " 191 | + 'against all servers, marking the entire test as "unsupported"' 192 | ) 193 | for s in servers: 194 | self.test_results[s][c][t] = TestResult.UNSUPPORTED 195 | # If a server failed a test against all clients, make the test unsupported for the server 196 | if len(clients) > 1: 197 | for s in set(servers) - set(self._no_auto_unsupported): 198 | for t in self._tests: 199 | if all(self.test_results[s][c][t] in questionable for c in clients): 200 | print( 201 | f"Server {s} failed or did not support test {t.name()} " 202 | + 'against all clients, marking the entire test as "unsupported"' 203 | ) 204 | for c in clients: 205 | self.test_results[s][c][t] = TestResult.UNSUPPORTED 206 | 207 | def _print_results(self): 208 | """print the interop table""" 209 | logging.info("Run took %s", datetime.now() - self._start_time) 210 | 211 | def get_letters(result): 212 | return ( 213 | result.symbol() 214 | + "(" 215 | + ",".join( 216 | [test.abbreviation() for test in cell if cell[test] is result] 217 | ) 218 | + ")" 219 | ) 220 | 221 | if len(self._tests) > 0: 222 | t = prettytable.PrettyTable() 223 | if self._markdown: 224 | t.set_style(prettytable.MARKDOWN) 225 | else: 226 | t.hrules = prettytable.ALL 227 | t.vrules = prettytable.ALL 228 | rows = {} 229 | columns = {} 230 | for client, server in self._client_server_pairs: 231 | columns[server] = {} 232 | row = rows.setdefault(client, {}) 233 | cell = self.test_results[server][client] 234 | br = "
" if self._markdown else "\n" 235 | res = colored(get_letters(TestResult.SUCCEEDED), "green") + br 236 | res += colored(get_letters(TestResult.UNSUPPORTED), "grey") + br 237 | res += colored(get_letters(TestResult.FAILED), "red") 238 | row[server] = res 239 | 240 | t.field_names = [""] + [column for column, _ in columns.items()] 241 | for client, results in rows.items(): 242 | row = [client] 243 | for server, _ in columns.items(): 244 | row += [results.setdefault(server, "")] 245 | t.add_row(row) 246 | print(t) 247 | 248 | if len(self._measurements) > 0: 249 | t = prettytable.PrettyTable() 250 | if self._markdown: 251 | t.set_style(prettytable.MARKDOWN) 252 | else: 253 | t.hrules = prettytable.ALL 254 | t.vrules = prettytable.ALL 255 | rows = {} 256 | columns = {} 257 | for client, server in self._client_server_pairs: 258 | columns[server] = {} 259 | row = rows.setdefault(client, {}) 260 | cell = self.measurement_results[server][client] 261 | results = [] 262 | for measurement in self._measurements: 263 | res = cell[measurement] 264 | if not hasattr(res, "result"): 265 | continue 266 | if res.result == TestResult.SUCCEEDED: 267 | results.append( 268 | colored( 269 | measurement.abbreviation() + ": " + res.details, 270 | "green", 271 | ) 272 | ) 273 | elif res.result == TestResult.UNSUPPORTED: 274 | results.append(colored(measurement.abbreviation(), "grey")) 275 | elif res.result == TestResult.FAILED: 276 | results.append(colored(measurement.abbreviation(), "red")) 277 | row[server] = "\n".join(results) 278 | t.field_names = [""] + [column for column, _ in columns.items()] 279 | for client, results in rows.items(): 280 | row = [client] 281 | for server, _ in columns.items(): 282 | row += [results.setdefault(server, "")] 283 | t.add_row(row) 284 | print(t) 285 | 286 | def _export_results(self): 287 | if not self._output: 288 | return 289 | clients = list(set(client for client, _ in self._client_server_pairs)) 290 | servers = list(set(server for _, server in self._client_server_pairs)) 291 | out = { 292 | "start_time": self._start_time.timestamp(), 293 | "end_time": datetime.now().timestamp(), 294 | "log_dir": self._log_dir, 295 | "servers": servers, 296 | "clients": clients, 297 | "urls": {x: self._implementations[x]["url"] for x in clients + servers}, 298 | "tests": { 299 | x.abbreviation(): { 300 | "name": x.name(), 301 | "desc": x.desc(), 302 | } 303 | for x in self._tests + self._measurements 304 | }, 305 | "quic_draft": testcases.QUIC_DRAFT, 306 | "quic_version": testcases.QUIC_VERSION, 307 | "results": [], 308 | "measurements": [], 309 | } 310 | 311 | for client in clients: 312 | for server in servers: 313 | results = [] 314 | for test in self._tests: 315 | r = None 316 | if hasattr(self.test_results[server][client][test], "value"): 317 | r = self.test_results[server][client][test].value 318 | results.append( 319 | { 320 | "abbr": test.abbreviation(), 321 | "name": test.name(), # TODO: remove 322 | "result": r, 323 | } 324 | ) 325 | out["results"].append(results) 326 | 327 | measurements = [] 328 | for measurement in self._measurements: 329 | res = self.measurement_results[server][client][measurement] 330 | if not hasattr(res, "result"): 331 | continue 332 | measurements.append( 333 | { 334 | "name": measurement.name(), # TODO: remove 335 | "abbr": measurement.abbreviation(), 336 | "result": res.result.value, 337 | "details": res.details, 338 | } 339 | ) 340 | out["measurements"].append(measurements) 341 | 342 | f = open(self._output, "w") 343 | json.dump(out, f) 344 | f.close() 345 | 346 | def _copy_logs(self, container: str, dir: tempfile.TemporaryDirectory): 347 | cmd = ( 348 | "docker cp \"$(docker ps -a --format '{{.ID}} {{.Names}}' | awk '/^.* " 349 | + container 350 | + "$/ {print $1}')\":/logs/. " 351 | + dir.name 352 | ) 353 | r = subprocess.run( 354 | cmd, 355 | shell=True, 356 | stdout=subprocess.PIPE, 357 | stderr=subprocess.STDOUT, 358 | ) 359 | if r.returncode != 0: 360 | logging.info( 361 | "Copying logs from %s failed: %s", 362 | container, 363 | r.stdout.decode("utf-8", errors="replace"), 364 | ) 365 | 366 | def _run_testcase( 367 | self, server: str, client: str, test: Callable[[], testcases.TestCase] 368 | ) -> TestResult: 369 | return self._run_test(server, client, None, test)[0] 370 | 371 | def _run_test( 372 | self, 373 | server: str, 374 | client: str, 375 | log_dir_prefix: None, 376 | test: Callable[[], testcases.TestCase], 377 | ) -> Tuple[TestResult, float]: 378 | start_time = datetime.now() 379 | sim_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_sim_") 380 | server_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_server_") 381 | client_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_client_") 382 | log_file = tempfile.NamedTemporaryFile(dir="/tmp", prefix="output_log_") 383 | log_handler = logging.FileHandler(log_file.name) 384 | log_handler.setLevel(logging.DEBUG) 385 | 386 | formatter = LogFileFormatter("%(asctime)s %(message)s") 387 | log_handler.setFormatter(formatter) 388 | logging.getLogger().addHandler(log_handler) 389 | 390 | testcase = test( 391 | sim_log_dir=sim_log_dir, 392 | client_keylog_file=client_log_dir.name + "/keys.log", 393 | server_keylog_file=server_log_dir.name + "/keys.log", 394 | ) 395 | print( 396 | "Server: " 397 | + server 398 | + ". Client: " 399 | + client 400 | + ". Running test case: " 401 | + str(testcase) 402 | ) 403 | 404 | reqs = " ".join([testcase.urlprefix() + p for p in testcase.get_paths()]) 405 | logging.debug("Requests: %s", reqs) 406 | params = ( 407 | "WAITFORSERVER=server:443 " 408 | "CERTS=" + testcase.certs_dir() + " " 409 | "TESTCASE_SERVER=" + testcase.testname(Perspective.SERVER) + " " 410 | "TESTCASE_CLIENT=" + testcase.testname(Perspective.CLIENT) + " " 411 | "WWW=" + testcase.www_dir() + " " 412 | "DOWNLOADS=" + testcase.download_dir() + " " 413 | "SERVER_LOGS=" + server_log_dir.name + " " 414 | "CLIENT_LOGS=" + client_log_dir.name + " " 415 | 'SCENARIO="{}" ' 416 | "CLIENT=" + self._implementations[client]["image"] + " " 417 | "SERVER=" + self._implementations[server]["image"] + " " 418 | 'REQUESTS="' + reqs + '" ' 419 | ).format(testcase.scenario()) 420 | params += " ".join(testcase.additional_envs()) 421 | containers = "sim client server " + " ".join(testcase.additional_containers()) 422 | cmd = ( 423 | params 424 | + " docker compose --env-file empty.env up --abort-on-container-exit --timeout 1 " 425 | + containers 426 | ) 427 | logging.debug("Command: %s", cmd) 428 | 429 | status = TestResult.FAILED 430 | output = "" 431 | expired = False 432 | try: 433 | r = subprocess.run( 434 | cmd, 435 | shell=True, 436 | stdout=subprocess.PIPE, 437 | stderr=subprocess.STDOUT, 438 | timeout=testcase.timeout(), 439 | ) 440 | output = r.stdout 441 | except subprocess.TimeoutExpired as ex: 442 | output = ex.stdout 443 | expired = True 444 | 445 | logging.debug("%s", output.decode("utf-8", errors="replace")) 446 | 447 | if expired: 448 | logging.debug("Test failed: took longer than %ds.", testcase.timeout()) 449 | r = subprocess.run( 450 | "docker compose --env-file empty.env stop " + containers, 451 | shell=True, 452 | stdout=subprocess.PIPE, 453 | stderr=subprocess.STDOUT, 454 | timeout=60, 455 | ) 456 | logging.debug("%s", r.stdout.decode("utf-8", errors="replace")) 457 | 458 | # copy the pcaps from the simulator 459 | self._copy_logs("sim", sim_log_dir) 460 | self._copy_logs("client", client_log_dir) 461 | self._copy_logs("server", server_log_dir) 462 | 463 | if not expired: 464 | lines = output.splitlines() 465 | if self._is_unsupported(lines): 466 | status = TestResult.UNSUPPORTED 467 | elif any("client exited with code 0" in str(line) for line in lines): 468 | try: 469 | status = testcase.check() 470 | except FileNotFoundError as e: 471 | logging.error(f"testcase.check() threw FileNotFoundError: {e}") 472 | status = TestResult.FAILED 473 | 474 | # save logs 475 | logging.getLogger().removeHandler(log_handler) 476 | log_handler.close() 477 | if status == TestResult.FAILED or status == TestResult.SUCCEEDED: 478 | log_dir = self._log_dir + "/" + server + "_" + client + "/" + str(testcase) 479 | if log_dir_prefix: 480 | log_dir += "/" + log_dir_prefix 481 | shutil.copytree(server_log_dir.name, log_dir + "/server") 482 | shutil.copytree(client_log_dir.name, log_dir + "/client") 483 | shutil.copytree(sim_log_dir.name, log_dir + "/sim") 484 | shutil.copyfile(log_file.name, log_dir + "/output.txt") 485 | if self._save_files and status == TestResult.FAILED: 486 | shutil.copytree(testcase.www_dir(), log_dir + "/www") 487 | try: 488 | shutil.copytree(testcase.download_dir(), log_dir + "/downloads") 489 | except Exception as exception: 490 | logging.info("Could not copy downloaded files: %s", exception) 491 | 492 | testcase.cleanup() 493 | server_log_dir.cleanup() 494 | client_log_dir.cleanup() 495 | sim_log_dir.cleanup() 496 | logging.debug( 497 | "Test: %s took %ss, status: %s", 498 | str(testcase), 499 | (datetime.now() - start_time).total_seconds(), 500 | str(status), 501 | ) 502 | 503 | # measurements also have a value 504 | if hasattr(testcase, "result"): 505 | value = testcase.result() 506 | else: 507 | value = None 508 | 509 | return status, value 510 | 511 | def _run_measurement( 512 | self, server: str, client: str, test: Callable[[], testcases.Measurement] 513 | ) -> MeasurementResult: 514 | values = [] 515 | for i in range(0, test.repetitions()): 516 | result, value = self._run_test(server, client, "%d" % (i + 1), test) 517 | if result != TestResult.SUCCEEDED: 518 | res = MeasurementResult() 519 | res.result = result 520 | res.details = "" 521 | return res 522 | values.append(value) 523 | 524 | logging.debug(values) 525 | res = MeasurementResult() 526 | res.result = TestResult.SUCCEEDED 527 | res.details = "{:.0f} (± {:.0f}) {}".format( 528 | statistics.mean(values), statistics.stdev(values), test.unit() 529 | ) 530 | return res 531 | 532 | def run(self): 533 | """run the interop test suite and output the table""" 534 | 535 | nr_failed = 0 536 | for client, server in self._client_server_pairs: 537 | logging.debug( 538 | "Running with server %s (%s) and client %s (%s)", 539 | server, 540 | self._implementations[server]["image"], 541 | client, 542 | self._implementations[client]["image"], 543 | ) 544 | if not ( 545 | self._check_impl_is_compliant(server) 546 | and self._check_impl_is_compliant(client) 547 | ): 548 | logging.info("Not compliant, skipping") 549 | continue 550 | 551 | # run the test cases 552 | for testcase in self._tests: 553 | status = self._run_testcase(server, client, testcase) 554 | self.test_results[server][client][testcase] = status 555 | if status == TestResult.FAILED: 556 | nr_failed += 1 557 | 558 | # run the measurements 559 | for measurement in self._measurements: 560 | res = self._run_measurement(server, client, measurement) 561 | self.measurement_results[server][client][measurement] = res 562 | 563 | self._postprocess_results() 564 | self._print_results() 565 | self._export_results() 566 | return nr_failed 567 | -------------------------------------------------------------------------------- /pull.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import sys 4 | 5 | from implementations import IMPLEMENTATIONS 6 | 7 | print("Pulling the simulator...") 8 | os.system("docker pull martenseemann/quic-network-simulator") 9 | 10 | print("\nPulling the iperf endpoint...") 11 | os.system("docker pull martenseemann/quic-interop-iperf-endpoint") 12 | 13 | 14 | def get_args(): 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("-i", "--implementations", help="implementations to pull") 17 | return parser.parse_args() 18 | 19 | 20 | implementations = {} 21 | if get_args().implementations: 22 | for s in get_args().implementations.split(","): 23 | if s not in [n for n, _ in IMPLEMENTATIONS.items()]: 24 | sys.exit("implementation " + s + " not found.") 25 | implementations[s] = IMPLEMENTATIONS[s] 26 | else: 27 | implementations = IMPLEMENTATIONS 28 | 29 | for name, value in implementations.items(): 30 | print("\nPulling " + name + "...") 31 | os.system("docker pull " + value["image"]) 32 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pycryptodome 2 | termcolor 3 | prettytable 4 | pyshark -------------------------------------------------------------------------------- /result.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class TestResult(Enum): 5 | SUCCEEDED = "succeeded" 6 | FAILED = "failed" 7 | UNSUPPORTED = "unsupported" 8 | 9 | def symbol(self): 10 | if self == TestResult.SUCCEEDED: 11 | return "✓" 12 | elif self == TestResult.FAILED: 13 | return "✕" 14 | elif self == TestResult.UNSUPPORTED: 15 | return "?" 16 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | from typing import List, Tuple 6 | 7 | import testcases 8 | from implementations import IMPLEMENTATIONS, Role 9 | from interop import InteropRunner 10 | from testcases import MEASUREMENTS, TESTCASES 11 | 12 | implementations = { 13 | name: {"image": value["image"], "url": value["url"]} 14 | for name, value in IMPLEMENTATIONS.items() 15 | } 16 | client_implementations = [ 17 | name 18 | for name, value in IMPLEMENTATIONS.items() 19 | if value["role"] == Role.BOTH or value["role"] == Role.CLIENT 20 | ] 21 | server_implementations = [ 22 | name 23 | for name, value in IMPLEMENTATIONS.items() 24 | if value["role"] == Role.BOTH or value["role"] == Role.SERVER 25 | ] 26 | 27 | 28 | def main(): 29 | def get_args(): 30 | parser = argparse.ArgumentParser() 31 | parser.add_argument( 32 | "-d", 33 | "--debug", 34 | action="store_const", 35 | const=True, 36 | default=False, 37 | help="turn on debug logs", 38 | ) 39 | parser.add_argument( 40 | "-s", "--server", help="server implementations (comma-separated)" 41 | ) 42 | parser.add_argument( 43 | "-c", "--client", help="client implementations (comma-separated)" 44 | ) 45 | parser.add_argument( 46 | "-t", 47 | "--test", 48 | help="test cases (comma-separatated). Valid test cases are: " 49 | + ", ".join([x.name() for x in TESTCASES + MEASUREMENTS]), 50 | ) 51 | parser.add_argument( 52 | "-r", 53 | "--replace", 54 | help="replace path of implementation. Example: -r myquicimpl=dockertagname", 55 | ) 56 | parser.add_argument( 57 | "-l", 58 | "--log-dir", 59 | help="log directory", 60 | default="", 61 | ) 62 | parser.add_argument( 63 | "-f", "--save-files", help="save downloaded files if a test fails" 64 | ) 65 | parser.add_argument( 66 | "-j", "--json", help="output the matrix to file in json format" 67 | ) 68 | parser.add_argument( 69 | "-m", 70 | "--markdown", 71 | help="output the matrix in Markdown format", 72 | action="store_const", 73 | const=True, 74 | default=False, 75 | ) 76 | parser.add_argument( 77 | "-i", 78 | "--must-include", 79 | help="implementation that must be included", 80 | ) 81 | parser.add_argument( 82 | "-n", 83 | "--no-auto-unsupported", 84 | help="implementations for which auto-marking as unsupported when all tests fail should be skipped", 85 | ) 86 | return parser.parse_args() 87 | 88 | replace_arg = get_args().replace 89 | if replace_arg: 90 | for s in replace_arg.split(","): 91 | pair = s.split("=") 92 | if len(pair) != 2: 93 | sys.exit("Invalid format for replace") 94 | name, image = pair[0], pair[1] 95 | if name not in IMPLEMENTATIONS: 96 | sys.exit("Implementation " + name + " not found.") 97 | implementations[name]["image"] = image 98 | 99 | def get_impls(arg, availableImpls, role) -> List[str]: 100 | if not arg: 101 | return availableImpls 102 | impls = [] 103 | for s in arg.split(","): 104 | if s not in availableImpls: 105 | sys.exit(role + " implementation " + s + " not found.") 106 | impls.append(s) 107 | return impls 108 | 109 | def get_impl_pairs(clients, servers, must_include) -> List[Tuple[str, str]]: 110 | impls = [] 111 | for client in clients: 112 | for server in servers: 113 | if ( 114 | must_include is None 115 | or client == must_include 116 | or server == must_include 117 | ): 118 | impls.append((client, server)) 119 | return impls 120 | 121 | def get_tests_and_measurements( 122 | arg, 123 | ) -> Tuple[List[testcases.TestCase], List[testcases.TestCase]]: 124 | if arg is None: 125 | return TESTCASES, MEASUREMENTS 126 | elif arg == "onlyTests": 127 | return TESTCASES, [] 128 | elif arg == "onlyMeasurements": 129 | return [], MEASUREMENTS 130 | elif not arg: 131 | return [] 132 | tests = [] 133 | measurements = [] 134 | for t in arg.split(","): 135 | if t in [tc.name() for tc in TESTCASES]: 136 | tests += [tc for tc in TESTCASES if tc.name() == t] 137 | elif t in [tc.name() for tc in MEASUREMENTS]: 138 | measurements += [tc for tc in MEASUREMENTS if tc.name() == t] 139 | else: 140 | print( 141 | ( 142 | "Test case {} not found.\n" 143 | "Available testcases: {}\n" 144 | "Available measurements: {}" 145 | ).format( 146 | t, 147 | ", ".join([t.name() for t in TESTCASES]), 148 | ", ".join([t.name() for t in MEASUREMENTS]), 149 | ) 150 | ) 151 | sys.exit() 152 | return tests, measurements 153 | 154 | t = get_tests_and_measurements(get_args().test) 155 | clients = get_impls(get_args().client, client_implementations, "Client") 156 | servers = get_impls(get_args().server, server_implementations, "Server") 157 | # If there is only one client or server, we should not automatically mark tests as unsupported 158 | no_auto_unsupported = set() 159 | for kind in [clients, servers]: 160 | if len(kind) == 1: 161 | no_auto_unsupported.add(kind[0]) 162 | return InteropRunner( 163 | implementations=implementations, 164 | client_server_pairs=get_impl_pairs(clients, servers, get_args().must_include), 165 | tests=t[0], 166 | measurements=t[1], 167 | output=get_args().json, 168 | markdown=get_args().markdown, 169 | debug=get_args().debug, 170 | log_dir=get_args().log_dir, 171 | save_files=get_args().save_files, 172 | no_auto_unsupported=( 173 | no_auto_unsupported 174 | if get_args().no_auto_unsupported is None 175 | else get_impls( 176 | get_args().no_auto_unsupported, clients + servers, "Client/Server" 177 | ) 178 | ), 179 | ).run() 180 | 181 | 182 | if __name__ == "__main__": 183 | sys.exit(main()) 184 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore=E501,W503 3 | -------------------------------------------------------------------------------- /testcases.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import filecmp 3 | import logging 4 | import os 5 | import random 6 | import re 7 | import shutil 8 | import string 9 | import subprocess 10 | import sys 11 | import tempfile 12 | from datetime import timedelta 13 | from enum import Enum, IntEnum 14 | from trace import ( 15 | QUIC_V2, 16 | Direction, 17 | PacketType, 18 | TraceAnalyzer, 19 | get_direction, 20 | get_packet_type, 21 | ) 22 | from typing import List, Tuple 23 | 24 | from Crypto.Cipher import AES 25 | 26 | from result import TestResult 27 | 28 | KB = 1 << 10 29 | MB = 1 << 20 30 | 31 | QUIC_DRAFT = 34 # draft-34 32 | QUIC_VERSION = hex(0x1) 33 | 34 | 35 | class Perspective(Enum): 36 | SERVER = "server" 37 | CLIENT = "client" 38 | 39 | 40 | class ECN(IntEnum): 41 | NONE = 0 42 | ECT1 = 1 43 | ECT0 = 2 44 | CE = 3 45 | 46 | 47 | def random_string(length: int): 48 | """Generate a random string of fixed length""" 49 | letters = string.ascii_lowercase 50 | return "".join(random.choice(letters) for i in range(length)) 51 | 52 | 53 | def generate_cert_chain(directory: str, length: int = 1): 54 | cmd = "./certs.sh " + directory + " " + str(length) 55 | r = subprocess.run( 56 | cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT 57 | ) 58 | logging.debug("%s", r.stdout.decode("utf-8")) 59 | if r.returncode != 0: 60 | logging.info("Unable to create certificates") 61 | sys.exit(1) 62 | 63 | 64 | class TestCase(abc.ABC): 65 | _files = [] 66 | _www_dir = None 67 | _client_keylog_file = None 68 | _server_keylog_file = None 69 | _download_dir = None 70 | _sim_log_dir = None 71 | _cert_dir = None 72 | _cached_server_trace = None 73 | _cached_client_trace = None 74 | 75 | def __init__( 76 | self, 77 | sim_log_dir: tempfile.TemporaryDirectory, 78 | client_keylog_file: str, 79 | server_keylog_file: str, 80 | ): 81 | self._server_keylog_file = server_keylog_file 82 | self._client_keylog_file = client_keylog_file 83 | self._files = [] 84 | self._sim_log_dir = sim_log_dir 85 | 86 | @abc.abstractmethod 87 | def name(self): 88 | pass 89 | 90 | @abc.abstractmethod 91 | def desc(self): 92 | pass 93 | 94 | def __str__(self): 95 | return self.name() 96 | 97 | def testname(self, p: Perspective): 98 | """The name of testcase presented to the endpoint Docker images""" 99 | return self.name() 100 | 101 | @staticmethod 102 | def scenario() -> str: 103 | """Scenario for the ns3 simulator""" 104 | return "simple-p2p --delay=15ms --bandwidth=10Mbps --queue=25" 105 | 106 | @staticmethod 107 | def timeout() -> int: 108 | """timeout in s""" 109 | return 60 110 | 111 | @staticmethod 112 | def urlprefix() -> str: 113 | """URL prefix""" 114 | return "https://server4:443/" 115 | 116 | @staticmethod 117 | def additional_envs() -> List[str]: 118 | return [""] 119 | 120 | @staticmethod 121 | def additional_containers() -> List[str]: 122 | return [""] 123 | 124 | def www_dir(self): 125 | if not self._www_dir: 126 | self._www_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="www_") 127 | return self._www_dir.name + "/" 128 | 129 | def download_dir(self): 130 | if not self._download_dir: 131 | self._download_dir = tempfile.TemporaryDirectory( 132 | dir="/tmp", prefix="download_" 133 | ) 134 | return self._download_dir.name + "/" 135 | 136 | def certs_dir(self): 137 | if not self._cert_dir: 138 | self._cert_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="certs_") 139 | generate_cert_chain(self._cert_dir.name) 140 | return self._cert_dir.name + "/" 141 | 142 | def _is_valid_keylog(self, filename) -> bool: 143 | if not os.path.isfile(filename) or os.path.getsize(filename) == 0: 144 | return False 145 | with open(filename, "r") as file: 146 | if not re.search( 147 | r"^SERVER_HANDSHAKE_TRAFFIC_SECRET", file.read(), re.MULTILINE 148 | ): 149 | logging.info("Key log file %s is using incorrect format.", filename) 150 | return False 151 | return True 152 | 153 | def _keylog_file(self) -> str: 154 | if self._is_valid_keylog(self._client_keylog_file): 155 | logging.debug("Using the client's key log file.") 156 | return self._client_keylog_file 157 | elif self._is_valid_keylog(self._server_keylog_file): 158 | logging.debug("Using the server's key log file.") 159 | return self._server_keylog_file 160 | logging.debug("No key log file found.") 161 | 162 | def _inject_keylog_if_possible(self, trace: str): 163 | """ 164 | Inject the keylog file into the pcap file if it is available and valid. 165 | """ 166 | keylog = self._keylog_file() 167 | if keylog is None: 168 | return 169 | 170 | with tempfile.NamedTemporaryFile() as tmp: 171 | r = subprocess.run( 172 | f"editcap --inject-secrets tls,{keylog} {trace} {tmp.name}", 173 | shell=True, 174 | stdout=subprocess.PIPE, 175 | stderr=subprocess.STDOUT, 176 | ) 177 | logging.debug("%s", r.stdout.decode("utf-8")) 178 | if r.returncode != 0: 179 | return 180 | shutil.copy(tmp.name, trace) 181 | 182 | def _client_trace(self): 183 | if self._cached_client_trace is None: 184 | trace = self._sim_log_dir.name + "/trace_node_left.pcap" 185 | self._inject_keylog_if_possible(trace) 186 | self._cached_client_trace = TraceAnalyzer(trace, self._keylog_file()) 187 | return self._cached_client_trace 188 | 189 | def _server_trace(self): 190 | if self._cached_server_trace is None: 191 | trace = self._sim_log_dir.name + "/trace_node_right.pcap" 192 | self._inject_keylog_if_possible(trace) 193 | self._cached_server_trace = TraceAnalyzer(trace, self._keylog_file()) 194 | return self._cached_server_trace 195 | 196 | # see https://www.stefanocappellini.it/generate-pseudorandom-bytes-with-python/ for benchmarks 197 | def _generate_random_file(self, size: int, filename_len=10) -> str: 198 | filename = random_string(filename_len) 199 | enc = AES.new(os.urandom(32), AES.MODE_OFB, b"a" * 16) 200 | f = open(self.www_dir() + filename, "wb") 201 | f.write(enc.encrypt(b" " * size)) 202 | f.close() 203 | logging.debug("Generated random file: %s of size: %d", filename, size) 204 | return filename 205 | 206 | def _retry_sent(self) -> bool: 207 | return len(self._client_trace().get_retry()) > 0 208 | 209 | def _check_version_and_files(self) -> bool: 210 | versions = [hex(int(v, 0)) for v in self._get_versions()] 211 | if len(versions) != 1: 212 | logging.info("Expected exactly one version. Got %s", versions) 213 | return False 214 | if QUIC_VERSION not in versions: 215 | logging.info("Wrong version. Expected %s, got %s", QUIC_VERSION, versions) 216 | return False 217 | return self._check_files() 218 | 219 | def _check_files(self) -> bool: 220 | if len(self._files) == 0: 221 | raise Exception("No test files generated.") 222 | files = [ 223 | n 224 | for n in os.listdir(self.download_dir()) 225 | if os.path.isfile(os.path.join(self.download_dir(), n)) 226 | ] 227 | too_many = [f for f in files if f not in self._files] 228 | if len(too_many) != 0: 229 | logging.info("Found unexpected downloaded files: %s", too_many) 230 | too_few = [f for f in self._files if f not in files] 231 | if len(too_few) != 0: 232 | logging.info("Missing files: %s", too_few) 233 | if len(too_many) != 0 or len(too_few) != 0: 234 | return False 235 | for f in self._files: 236 | fp = self.download_dir() + f 237 | if not os.path.isfile(fp): 238 | logging.info("File %s does not exist.", fp) 239 | return False 240 | try: 241 | size = os.path.getsize(self.www_dir() + f) 242 | downloaded_size = os.path.getsize(fp) 243 | if size != downloaded_size: 244 | logging.info( 245 | "File size of %s doesn't match. Original: %d bytes, downloaded: %d bytes.", 246 | fp, 247 | size, 248 | downloaded_size, 249 | ) 250 | return False 251 | if not filecmp.cmp(self.www_dir() + f, fp, shallow=False): 252 | logging.info("File contents of %s do not match.", fp) 253 | return False 254 | except Exception as exception: 255 | logging.info( 256 | "Could not compare files %s and %s: %s", 257 | self.www_dir() + f, 258 | fp, 259 | exception, 260 | ) 261 | return False 262 | logging.debug("Check of downloaded files succeeded.") 263 | return True 264 | 265 | def _count_handshakes(self) -> int: 266 | """Count the number of QUIC handshakes""" 267 | tr = self._server_trace() 268 | # Determine the number of handshakes by looking at Initial packets. 269 | # This is easier, since the SCID of Initial packets doesn't changes. 270 | return len(set([p.scid for p in tr.get_initial(Direction.FROM_SERVER)])) 271 | 272 | def _get_versions(self) -> set: 273 | """Get the QUIC versions""" 274 | tr = self._server_trace() 275 | return set([p.version for p in tr.get_initial(Direction.FROM_SERVER)]) 276 | 277 | def _payload_size(self, packets: List) -> int: 278 | """Get the sum of the payload sizes of all packets""" 279 | size = 0 280 | for p in packets: 281 | if hasattr(p, "long_packet_type") or hasattr(p, "long_packet_type_v2"): 282 | if hasattr(p, "payload"): # when keys are available 283 | size += len(p.payload.split(":")) 284 | else: 285 | size += len(p.remaining_payload.split(":")) 286 | else: 287 | if hasattr(p, "protected_payload"): 288 | size += len(p.protected_payload.split(":")) 289 | return size 290 | 291 | def cleanup(self): 292 | if self._www_dir: 293 | self._www_dir.cleanup() 294 | self._www_dir = None 295 | if self._download_dir: 296 | self._download_dir.cleanup() 297 | self._download_dir = None 298 | 299 | @abc.abstractmethod 300 | def get_paths(self): 301 | pass 302 | 303 | @abc.abstractmethod 304 | def check(self) -> TestResult: 305 | self._client_trace() 306 | self._server_trace() 307 | pass 308 | 309 | 310 | class Measurement(TestCase): 311 | @abc.abstractmethod 312 | def result(self) -> float: 313 | pass 314 | 315 | @staticmethod 316 | @abc.abstractmethod 317 | def unit() -> str: 318 | pass 319 | 320 | @staticmethod 321 | @abc.abstractmethod 322 | def repetitions() -> int: 323 | pass 324 | 325 | 326 | class TestCaseVersionNegotiation(TestCase): 327 | @staticmethod 328 | def name(): 329 | return "versionnegotiation" 330 | 331 | @staticmethod 332 | def abbreviation(): 333 | return "V" 334 | 335 | @staticmethod 336 | def desc(): 337 | return "A version negotiation packet is elicited and acted on." 338 | 339 | def get_paths(self): 340 | return [""] 341 | 342 | def check(self) -> TestResult: 343 | super().check() 344 | tr = self._client_trace() 345 | initials = tr.get_initial(Direction.FROM_CLIENT) 346 | dcid = "" 347 | for p in initials: 348 | dcid = p.dcid 349 | break 350 | if dcid == "": 351 | logging.info("Didn't find an Initial / a DCID.") 352 | return TestResult.FAILED 353 | vnps = tr.get_vnp() 354 | for p in vnps: 355 | if p.scid == dcid: 356 | return TestResult.SUCCEEDED 357 | logging.info("Didn't find a Version Negotiation Packet with matching SCID.") 358 | return TestResult.FAILED 359 | 360 | 361 | class TestCaseHandshake(TestCase): 362 | @staticmethod 363 | def name(): 364 | return "handshake" 365 | 366 | @staticmethod 367 | def abbreviation(): 368 | return "H" 369 | 370 | @staticmethod 371 | def desc(): 372 | return "Handshake completes successfully." 373 | 374 | def get_paths(self): 375 | self._files = [self._generate_random_file(1 * KB)] 376 | return self._files 377 | 378 | def check(self) -> TestResult: 379 | super().check() 380 | if not self._check_version_and_files(): 381 | return TestResult.FAILED 382 | if self._retry_sent(): 383 | logging.info("Didn't expect a Retry to be sent.") 384 | return TestResult.FAILED 385 | num_handshakes = self._count_handshakes() 386 | if num_handshakes != 1: 387 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 388 | return TestResult.FAILED 389 | return TestResult.SUCCEEDED 390 | 391 | 392 | class TestCaseLongRTT(TestCaseHandshake): 393 | @staticmethod 394 | def abbreviation(): 395 | return "LR" 396 | 397 | @staticmethod 398 | def name(): 399 | return "longrtt" 400 | 401 | @staticmethod 402 | def testname(p: Perspective): 403 | return "handshake" 404 | 405 | @staticmethod 406 | def desc(): 407 | return "Handshake completes when RTT is long." 408 | 409 | @staticmethod 410 | def scenario() -> str: 411 | """Scenario for the ns3 simulator""" 412 | return "simple-p2p --delay=750ms --bandwidth=10Mbps --queue=25" 413 | 414 | def check(self) -> TestResult: 415 | super().check() 416 | num_handshakes = self._count_handshakes() 417 | if num_handshakes != 1: 418 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 419 | return TestResult.FAILED 420 | if not self._check_version_and_files(): 421 | return TestResult.FAILED 422 | num_ch = 0 423 | for p in self._client_trace().get_initial(Direction.FROM_CLIENT): 424 | if hasattr(p, "tls_handshake_type"): 425 | if p.tls_handshake_type == "1": 426 | num_ch += 1 427 | # Retransmitted ClientHello does not have 428 | # tls_handshake_type attribute. See 429 | # https://gitlab.com/wireshark/wireshark/-/issues/18696 430 | # for details. 431 | elif hasattr(p, "retransmission") or hasattr(p, "overlap"): 432 | num_ch += 1 433 | if num_ch < 2: 434 | logging.info("Expected at least 2 ClientHellos. Got: %d", num_ch) 435 | return TestResult.FAILED 436 | return TestResult.SUCCEEDED 437 | 438 | 439 | class TestCaseTransfer(TestCase): 440 | @staticmethod 441 | def name(): 442 | return "transfer" 443 | 444 | @staticmethod 445 | def abbreviation(): 446 | return "DC" 447 | 448 | @staticmethod 449 | def desc(): 450 | return "Stream data is being sent and received correctly. Connection close completes with a zero error code." 451 | 452 | def get_paths(self): 453 | self._files = [ 454 | self._generate_random_file(2 * MB), 455 | self._generate_random_file(3 * MB), 456 | self._generate_random_file(5 * MB), 457 | ] 458 | return self._files 459 | 460 | def check(self) -> TestResult: 461 | super().check() 462 | num_handshakes = self._count_handshakes() 463 | if num_handshakes != 1: 464 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 465 | return TestResult.FAILED 466 | if not self._check_version_and_files(): 467 | return TestResult.FAILED 468 | return TestResult.SUCCEEDED 469 | 470 | 471 | class TestCaseChaCha20(TestCase): 472 | @staticmethod 473 | def name(): 474 | return "chacha20" 475 | 476 | @staticmethod 477 | def testname(p: Perspective): 478 | return "chacha20" 479 | 480 | @staticmethod 481 | def abbreviation(): 482 | return "C20" 483 | 484 | @staticmethod 485 | def desc(): 486 | return "Handshake completes using ChaCha20." 487 | 488 | def get_paths(self): 489 | self._files = [self._generate_random_file(3 * MB)] 490 | return self._files 491 | 492 | def check(self) -> TestResult: 493 | super().check() 494 | num_handshakes = self._count_handshakes() 495 | if num_handshakes != 1: 496 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 497 | return TestResult.FAILED 498 | ciphersuites = [] 499 | for p in self._client_trace().get_initial(Direction.FROM_CLIENT): 500 | if hasattr(p, "tls_handshake_ciphersuite"): 501 | ciphersuites.append(p.tls_handshake_ciphersuite) 502 | if len(set(ciphersuites)) != 1 or ( 503 | ciphersuites[0] != "4867" and ciphersuites[0] != "0x1303" 504 | ): 505 | logging.info( 506 | "Expected only ChaCha20 cipher suite to be offered. Got: %s", 507 | set(ciphersuites), 508 | ) 509 | return TestResult.FAILED 510 | if not self._check_version_and_files(): 511 | return TestResult.FAILED 512 | return TestResult.SUCCEEDED 513 | 514 | 515 | class TestCaseMultiplexing(TestCase): 516 | @staticmethod 517 | def name(): 518 | return "multiplexing" 519 | 520 | @staticmethod 521 | def testname(p: Perspective): 522 | return "transfer" 523 | 524 | @staticmethod 525 | def abbreviation(): 526 | return "M" 527 | 528 | @staticmethod 529 | def desc(): 530 | return "Thousands of files are transferred over a single connection, and server increased stream limits to accomodate client requests." 531 | 532 | def get_paths(self): 533 | for _ in range(1, 2000): 534 | self._files.append(self._generate_random_file(32)) 535 | return self._files 536 | 537 | def check(self) -> TestResult: 538 | super().check() 539 | if not self._keylog_file(): 540 | logging.info("Can't check test result. SSLKEYLOG required.") 541 | return TestResult.UNSUPPORTED 542 | num_handshakes = self._count_handshakes() 543 | if num_handshakes != 1: 544 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 545 | return TestResult.FAILED 546 | if not self._check_version_and_files(): 547 | return TestResult.FAILED 548 | # Check that the server set a bidirectional stream limit <= 1000 549 | checked_stream_limit = False 550 | for p in self._client_trace().get_handshake(Direction.FROM_SERVER): 551 | if hasattr(p, "tls.quic.parameter.initial_max_streams_bidi"): 552 | checked_stream_limit = True 553 | stream_limit = int( 554 | getattr(p, "tls.quic.parameter.initial_max_streams_bidi") 555 | ) 556 | logging.debug("Server set bidirectional stream limit: %d", stream_limit) 557 | if stream_limit > 1000: 558 | logging.info("Server set a stream limit > 1000.") 559 | return TestResult.FAILED 560 | if not checked_stream_limit: 561 | logging.info("Couldn't check stream limit.") 562 | return TestResult.FAILED 563 | return TestResult.SUCCEEDED 564 | 565 | 566 | class TestCaseRetry(TestCase): 567 | @staticmethod 568 | def name(): 569 | return "retry" 570 | 571 | @staticmethod 572 | def abbreviation(): 573 | return "S" 574 | 575 | @staticmethod 576 | def desc(): 577 | return "Server sends a Retry, and a subsequent connection using the Retry token completes successfully." 578 | 579 | def get_paths(self): 580 | self._files = [ 581 | self._generate_random_file(10 * KB), 582 | ] 583 | return self._files 584 | 585 | def _check_trace(self) -> bool: 586 | # check that (at least) one Retry packet was actually sent 587 | tr = self._client_trace() 588 | tokens = [] 589 | retries = tr.get_retry(Direction.FROM_SERVER) 590 | for p in retries: 591 | if not hasattr(p, "retry_token"): 592 | logging.info("Retry packet doesn't have a retry_token") 593 | logging.info(p) 594 | return False 595 | tokens += [p.retry_token.replace(":", "")] 596 | if len(tokens) == 0: 597 | logging.info("Didn't find any Retry packets.") 598 | return False 599 | 600 | # check that an Initial packet uses a token sent in the Retry packet(s) 601 | highest_pn_before_retry = -1 602 | for p in tr.get_initial(Direction.FROM_CLIENT): 603 | pn = int(p.packet_number) 604 | if p.token_length == "0": 605 | highest_pn_before_retry = max(highest_pn_before_retry, pn) 606 | continue 607 | if pn <= highest_pn_before_retry: 608 | logging.debug( 609 | "Client reset the packet number. Check failed for PN %d", pn 610 | ) 611 | return False 612 | token = p.token.replace(":", "") 613 | if token in tokens: 614 | logging.debug("Check of Retry succeeded. Token used: %s", token) 615 | return True 616 | logging.info("Didn't find any Initial packet using a Retry token.") 617 | return False 618 | 619 | def check(self) -> TestResult: 620 | super().check() 621 | num_handshakes = self._count_handshakes() 622 | if num_handshakes != 1: 623 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 624 | return TestResult.FAILED 625 | if not self._check_version_and_files(): 626 | return TestResult.FAILED 627 | if not self._check_trace(): 628 | return TestResult.FAILED 629 | return TestResult.SUCCEEDED 630 | 631 | 632 | class TestCaseResumption(TestCase): 633 | @staticmethod 634 | def name(): 635 | return "resumption" 636 | 637 | @staticmethod 638 | def abbreviation(): 639 | return "R" 640 | 641 | @staticmethod 642 | def desc(): 643 | return "Connection is established using TLS Session Resumption." 644 | 645 | def get_paths(self): 646 | self._files = [ 647 | self._generate_random_file(5 * KB), 648 | self._generate_random_file(10 * KB), 649 | ] 650 | return self._files 651 | 652 | def check(self) -> TestResult: 653 | super().check() 654 | if not self._keylog_file(): 655 | logging.info("Can't check test result. SSLKEYLOG required.") 656 | return TestResult.UNSUPPORTED 657 | num_handshakes = self._count_handshakes() 658 | if num_handshakes != 2: 659 | logging.info("Expected exactly 2 handshake. Got: %d", num_handshakes) 660 | return TestResult.FAILED 661 | 662 | handshake_packets = self._client_trace().get_handshake(Direction.FROM_SERVER) 663 | cids = [p.scid for p in handshake_packets] 664 | first_handshake_has_cert = False 665 | for p in handshake_packets: 666 | if p.scid == cids[0]: 667 | if hasattr(p, "tls_handshake_certificates_length"): 668 | first_handshake_has_cert = True 669 | elif p.scid == cids[len(cids) - 1]: # second handshake 670 | if hasattr(p, "tls_handshake_certificates_length"): 671 | logging.info( 672 | "Server sent a Certificate message in the second handshake." 673 | ) 674 | return TestResult.FAILED 675 | else: 676 | logging.info( 677 | "Found handshake packet that neither belongs to the first nor the second handshake." 678 | ) 679 | return TestResult.FAILED 680 | if not first_handshake_has_cert: 681 | logging.info( 682 | "Didn't find a Certificate message in the first handshake. That's weird." 683 | ) 684 | return TestResult.FAILED 685 | if not self._check_version_and_files(): 686 | return TestResult.FAILED 687 | return TestResult.SUCCEEDED 688 | 689 | 690 | class TestCaseZeroRTT(TestCase): 691 | NUM_FILES = 40 692 | FILESIZE = 32 # in bytes 693 | FILENAMELEN = 250 694 | 695 | @staticmethod 696 | def name(): 697 | return "zerortt" 698 | 699 | @staticmethod 700 | def abbreviation(): 701 | return "Z" 702 | 703 | @staticmethod 704 | def desc(): 705 | return "0-RTT data is being sent and acted on." 706 | 707 | def get_paths(self): 708 | for _ in range(self.NUM_FILES): 709 | self._files.append( 710 | self._generate_random_file(self.FILESIZE, self.FILENAMELEN) 711 | ) 712 | return self._files 713 | 714 | def check(self) -> TestResult: 715 | super().check() 716 | num_handshakes = self._count_handshakes() 717 | if num_handshakes != 2: 718 | logging.info("Expected exactly 2 handshakes. Got: %d", num_handshakes) 719 | return TestResult.FAILED 720 | if not self._check_version_and_files(): 721 | return TestResult.FAILED 722 | tr = self._client_trace() 723 | zeroRTTSize = self._payload_size(tr.get_0rtt()) 724 | oneRTTSize = self._payload_size(tr.get_1rtt(Direction.FROM_CLIENT)) 725 | logging.debug("0-RTT size: %d", zeroRTTSize) 726 | logging.debug("1-RTT size: %d", oneRTTSize) 727 | if zeroRTTSize == 0: 728 | logging.info("Client didn't send any 0-RTT data.") 729 | return TestResult.FAILED 730 | if oneRTTSize > 0.5 * self.FILENAMELEN * self.NUM_FILES: 731 | logging.info("Client sent too much data in 1-RTT packets.") 732 | return TestResult.FAILED 733 | return TestResult.SUCCEEDED 734 | 735 | 736 | class TestCaseHTTP3(TestCase): 737 | @staticmethod 738 | def name(): 739 | return "http3" 740 | 741 | @staticmethod 742 | def abbreviation(): 743 | return "3" 744 | 745 | @staticmethod 746 | def desc(): 747 | return "An H3 transaction succeeded." 748 | 749 | def get_paths(self): 750 | self._files = [ 751 | self._generate_random_file(5 * KB), 752 | self._generate_random_file(10 * KB), 753 | self._generate_random_file(500 * KB), 754 | ] 755 | return self._files 756 | 757 | def check(self) -> TestResult: 758 | super().check() 759 | num_handshakes = self._count_handshakes() 760 | if num_handshakes != 1: 761 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 762 | return TestResult.FAILED 763 | if not self._check_version_and_files(): 764 | return TestResult.FAILED 765 | return TestResult.SUCCEEDED 766 | 767 | 768 | class TestCaseAmplificationLimit(TestCase): 769 | @staticmethod 770 | def name(): 771 | return "amplificationlimit" 772 | 773 | @staticmethod 774 | def testname(p: Perspective): 775 | return "transfer" 776 | 777 | @staticmethod 778 | def abbreviation(): 779 | return "A" 780 | 781 | @staticmethod 782 | def desc(): 783 | return "The server obeys the 3x amplification limit." 784 | 785 | def certs_dir(self): 786 | if not self._cert_dir: 787 | self._cert_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="certs_") 788 | generate_cert_chain(self._cert_dir.name, 9) 789 | return self._cert_dir.name + "/" 790 | 791 | @staticmethod 792 | def scenario() -> str: 793 | """Scenario for the ns3 simulator""" 794 | # Let the ClientHello pass, but drop a bunch of retransmissions afterwards. 795 | return "droplist --delay=15ms --bandwidth=10Mbps --queue=25 --drops_to_server=2,3,4,5,6,7" 796 | 797 | def get_paths(self): 798 | self._files = [self._generate_random_file(5 * KB)] 799 | return self._files 800 | 801 | def check(self) -> TestResult: 802 | super().check() 803 | if not self._keylog_file(): 804 | logging.info("Can't check test result. SSLKEYLOG required.") 805 | return TestResult.UNSUPPORTED 806 | num_handshakes = self._count_handshakes() 807 | if num_handshakes != 1: 808 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 809 | return TestResult.FAILED 810 | if not self._check_version_and_files(): 811 | return TestResult.FAILED 812 | # Check the highest offset of CRYPTO frames sent by the server. 813 | # This way we can make sure that it actually used the provided cert chain. 814 | max_handshake_offset = 0 815 | for p in self._server_trace().get_handshake(Direction.FROM_SERVER): 816 | if hasattr(p, "crypto_offset"): 817 | max_handshake_offset = max( 818 | max_handshake_offset, int(p.crypto_offset) + int(p.crypto_length) 819 | ) 820 | if max_handshake_offset < 7500: 821 | logging.info( 822 | "Server sent too little Handshake CRYPTO data (%d bytes). Not using the provided cert chain?", 823 | max_handshake_offset, 824 | ) 825 | return TestResult.FAILED 826 | logging.debug( 827 | "Server sent %d bytes in Handshake CRYPTO frames.", max_handshake_offset 828 | ) 829 | 830 | # Check that the server didn't send more than 3-4x what the client sent. 831 | allowed = 0 832 | allowed_with_tolerance = 0 833 | client_sent, server_sent = 0, 0 # only for debug messages 834 | res = TestResult.FAILED 835 | log_output = [] 836 | for p in self._server_trace().get_raw_packets(): 837 | direction = get_direction(p) 838 | packet_type = get_packet_type(p) 839 | if packet_type == PacketType.VERSIONNEGOTIATION: 840 | logging.info("Didn't expect a Version Negotiation packet.") 841 | return TestResult.FAILED 842 | packet_size = int(p.udp.length) - 8 # subtract the UDP header length 843 | if packet_type == PacketType.INVALID: 844 | logging.debug("Couldn't determine packet type.") 845 | return TestResult.FAILED 846 | if direction == Direction.FROM_CLIENT: 847 | if packet_type is PacketType.HANDSHAKE: 848 | res = TestResult.SUCCEEDED 849 | break 850 | if packet_type is PacketType.INITIAL: 851 | client_sent += packet_size 852 | allowed += 3 * packet_size 853 | allowed_with_tolerance += 4 * packet_size 854 | log_output.append( 855 | "Received a {} byte Initial packet from the client. Amplification limit: {}".format( 856 | packet_size, 3 * client_sent 857 | ) 858 | ) 859 | elif direction == Direction.FROM_SERVER: 860 | server_sent += packet_size 861 | log_output.append( 862 | "Received a {} byte Handshake packet from the server. Total: {}".format( 863 | packet_size, server_sent 864 | ) 865 | ) 866 | if packet_size >= allowed_with_tolerance: 867 | log_output.append("Server violated the amplification limit.") 868 | break 869 | if packet_size > allowed: 870 | log_output.append( 871 | "Server violated the amplification limit, but stayed within 3-4x amplification. Letting it slide." 872 | ) 873 | allowed_with_tolerance -= packet_size 874 | allowed -= packet_size 875 | else: 876 | logging.debug("Couldn't determine sender of packet.") 877 | return TestResult.FAILED 878 | 879 | log_level = logging.DEBUG 880 | if res == TestResult.FAILED: 881 | log_level = logging.INFO 882 | for msg in log_output: 883 | logging.log(log_level, msg) 884 | return res 885 | 886 | 887 | class TestCaseBlackhole(TestCase): 888 | @staticmethod 889 | def name(): 890 | return "blackhole" 891 | 892 | @staticmethod 893 | def testname(p: Perspective): 894 | return "transfer" 895 | 896 | @staticmethod 897 | def abbreviation(): 898 | return "B" 899 | 900 | @staticmethod 901 | def desc(): 902 | return "Transfer succeeds despite underlying network blacking out for a few seconds." 903 | 904 | @staticmethod 905 | def scenario() -> str: 906 | """Scenario for the ns3 simulator""" 907 | return "blackhole --delay=15ms --bandwidth=10Mbps --queue=25 --on=5s --off=2s" 908 | 909 | def get_paths(self): 910 | self._files = [self._generate_random_file(10 * MB)] 911 | return self._files 912 | 913 | def check(self) -> TestResult: 914 | super().check() 915 | num_handshakes = self._count_handshakes() 916 | if num_handshakes != 1: 917 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 918 | return TestResult.FAILED 919 | if not self._check_version_and_files(): 920 | return TestResult.FAILED 921 | return TestResult.SUCCEEDED 922 | 923 | 924 | class TestCaseKeyUpdate(TestCaseHandshake): 925 | @staticmethod 926 | def name(): 927 | return "keyupdate" 928 | 929 | @staticmethod 930 | def testname(p: Perspective): 931 | if p is Perspective.CLIENT: 932 | return "keyupdate" 933 | return "transfer" 934 | 935 | @staticmethod 936 | def abbreviation(): 937 | return "U" 938 | 939 | @staticmethod 940 | def desc(): 941 | return "One of the two endpoints updates keys and the peer responds correctly." 942 | 943 | def get_paths(self): 944 | self._files = [self._generate_random_file(3 * MB)] 945 | return self._files 946 | 947 | def check(self) -> TestResult: 948 | super().check() 949 | if not self._keylog_file(): 950 | logging.info("Can't check test result. SSLKEYLOG required.") 951 | return TestResult.UNSUPPORTED 952 | 953 | num_handshakes = self._count_handshakes() 954 | if num_handshakes != 1: 955 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 956 | return TestResult.FAILED 957 | if not self._check_version_and_files(): 958 | return TestResult.FAILED 959 | 960 | client = {0: 0, 1: 0} 961 | server = {0: 0, 1: 0} 962 | try: 963 | 964 | def _get_key_phase(pkt) -> int: 965 | kp: str = pkt.key_phase.raw_value 966 | # when key_phase bit is set in a QUIC packet, certain versions 967 | # of wireshark (4.0.11, for example) have been seen to return the string value 968 | # "1" and certain other versions of wireshark return the string value "True". 969 | # here we deal with such values and return the integer value 1 for either of those. 970 | return 1 if kp in ["1", "True"] else 0 971 | 972 | for p in self._client_trace().get_1rtt(Direction.FROM_CLIENT): 973 | key_phase = _get_key_phase(p) 974 | client[key_phase] += 1 975 | for p in self._server_trace().get_1rtt(Direction.FROM_SERVER): 976 | key_phase = _get_key_phase(p) 977 | server[key_phase] += 1 978 | except Exception: 979 | logging.info( 980 | "Failed to read key phase bits. Potentially incorrect SSLKEYLOG?" 981 | ) 982 | return TestResult.FAILED 983 | 984 | succeeded = client[1] * server[1] > 0 985 | 986 | log_level = logging.INFO 987 | if succeeded: 988 | log_level = logging.DEBUG 989 | 990 | logging.log( 991 | log_level, 992 | "Client sent %d key phase 0 and %d key phase 1 packets.", 993 | client[0], 994 | client[1], 995 | ) 996 | logging.log( 997 | log_level, 998 | "Server sent %d key phase 0 and %d key phase 1 packets.", 999 | server[0], 1000 | server[1], 1001 | ) 1002 | if not succeeded: 1003 | logging.info( 1004 | "Expected to see packets sent with key phase 1 from both client and server." 1005 | ) 1006 | return TestResult.FAILED 1007 | return TestResult.SUCCEEDED 1008 | 1009 | 1010 | class TestCaseHandshakeLoss(TestCase): 1011 | _num_runs = 50 1012 | 1013 | @staticmethod 1014 | def name(): 1015 | return "handshakeloss" 1016 | 1017 | @staticmethod 1018 | def testname(p: Perspective): 1019 | return "multiconnect" 1020 | 1021 | @staticmethod 1022 | def abbreviation(): 1023 | return "L1" 1024 | 1025 | @staticmethod 1026 | def desc(): 1027 | return "Handshake completes under extreme packet loss." 1028 | 1029 | @staticmethod 1030 | def timeout() -> int: 1031 | return 300 1032 | 1033 | @staticmethod 1034 | def scenario() -> str: 1035 | """Scenario for the ns3 simulator""" 1036 | return "drop-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=30 --rate_to_client=30 --burst_to_server=3 --burst_to_client=3" 1037 | 1038 | def get_paths(self): 1039 | for _ in range(self._num_runs): 1040 | self._files.append(self._generate_random_file(1 * KB)) 1041 | return self._files 1042 | 1043 | def check(self) -> TestResult: 1044 | super().check() 1045 | num_handshakes = self._count_handshakes() 1046 | if num_handshakes != self._num_runs: 1047 | logging.info( 1048 | "Expected %d handshakes. Got: %d", self._num_runs, num_handshakes 1049 | ) 1050 | return TestResult.FAILED 1051 | if not self._check_version_and_files(): 1052 | return TestResult.FAILED 1053 | return TestResult.SUCCEEDED 1054 | 1055 | 1056 | class TestCaseTransferLoss(TestCase): 1057 | @staticmethod 1058 | def name(): 1059 | return "transferloss" 1060 | 1061 | @staticmethod 1062 | def testname(p: Perspective): 1063 | return "transfer" 1064 | 1065 | @staticmethod 1066 | def abbreviation(): 1067 | return "L2" 1068 | 1069 | @staticmethod 1070 | def desc(): 1071 | return "Transfer completes under moderate packet loss." 1072 | 1073 | @staticmethod 1074 | def scenario() -> str: 1075 | """Scenario for the ns3 simulator""" 1076 | return "drop-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=2 --rate_to_client=2 --burst_to_server=3 --burst_to_client=3" 1077 | 1078 | def get_paths(self): 1079 | # At a packet loss rate of 2% and a MTU of 1500 bytes, we can expect 27 dropped packets. 1080 | self._files = [self._generate_random_file(2 * MB)] 1081 | return self._files 1082 | 1083 | def check(self) -> TestResult: 1084 | super().check() 1085 | num_handshakes = self._count_handshakes() 1086 | if num_handshakes != 1: 1087 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 1088 | return TestResult.FAILED 1089 | if not self._check_version_and_files(): 1090 | return TestResult.FAILED 1091 | return TestResult.SUCCEEDED 1092 | 1093 | 1094 | class TestCaseHandshakeCorruption(TestCaseHandshakeLoss): 1095 | @staticmethod 1096 | def name(): 1097 | return "handshakecorruption" 1098 | 1099 | @staticmethod 1100 | def abbreviation(): 1101 | return "C1" 1102 | 1103 | @staticmethod 1104 | def desc(): 1105 | return "Handshake completes under extreme packet corruption." 1106 | 1107 | @staticmethod 1108 | def scenario() -> str: 1109 | """Scenario for the ns3 simulator""" 1110 | return "corrupt-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=30 --rate_to_client=30 --burst_to_server=3 --burst_to_client=3" 1111 | 1112 | 1113 | class TestCaseTransferCorruption(TestCaseTransferLoss): 1114 | @staticmethod 1115 | def name(): 1116 | return "transfercorruption" 1117 | 1118 | @staticmethod 1119 | def abbreviation(): 1120 | return "C2" 1121 | 1122 | @staticmethod 1123 | def desc(): 1124 | return "Transfer completes under moderate packet corruption." 1125 | 1126 | @staticmethod 1127 | def scenario() -> str: 1128 | """Scenario for the ns3 simulator""" 1129 | return "corrupt-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=2 --rate_to_client=2 --burst_to_server=3 --burst_to_client=3" 1130 | 1131 | 1132 | class TestCaseECN(TestCaseHandshake): 1133 | @staticmethod 1134 | def name(): 1135 | return "ecn" 1136 | 1137 | @staticmethod 1138 | def abbreviation(): 1139 | return "E" 1140 | 1141 | def _count_ecn(self, tr): 1142 | ecn = [0] * (max(ECN) + 1) 1143 | for p in tr: 1144 | e = int(getattr(p["ip"], "dsfield.ecn")) 1145 | ecn[e] += 1 1146 | for e in ECN: 1147 | logging.debug("%s %d", e, ecn[e]) 1148 | return ecn 1149 | 1150 | def _check_ecn_any(self, e) -> bool: 1151 | return e[ECN.ECT0] != 0 or e[ECN.ECT1] != 0 1152 | 1153 | def _check_ecn_marks(self, e) -> bool: 1154 | return e[ECN.CE] == 0 and self._check_ecn_any(e) 1155 | 1156 | def _check_ack_ecn(self, tr) -> bool: 1157 | # NOTE: We only check whether the trace contains any ACK-ECN information, not whether it is valid 1158 | for p in tr: 1159 | if hasattr(p["quic"], "ack.ect0_count"): 1160 | return True 1161 | return False 1162 | 1163 | def check(self) -> TestResult: 1164 | super().check() 1165 | if not self._keylog_file(): 1166 | logging.info("Can't check test result. SSLKEYLOG required.") 1167 | return TestResult.UNSUPPORTED 1168 | 1169 | result = super(TestCaseECN, self).check() 1170 | if result != TestResult.SUCCEEDED: 1171 | return result 1172 | 1173 | tr_client = self._client_trace()._get_packets( 1174 | self._client_trace()._get_direction_filter(Direction.FROM_CLIENT) + " quic" 1175 | ) 1176 | ecn = self._count_ecn(tr_client) 1177 | ecn_client_any_marked = self._check_ecn_any(ecn) 1178 | ecn_client_all_ok = self._check_ecn_marks(ecn) 1179 | ack_ecn_client_ok = self._check_ack_ecn(tr_client) 1180 | 1181 | tr_server = self._server_trace()._get_packets( 1182 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) + " quic" 1183 | ) 1184 | ecn = self._count_ecn(tr_server) 1185 | ecn_server_any_marked = self._check_ecn_any(ecn) 1186 | ecn_server_all_ok = self._check_ecn_marks(ecn) 1187 | ack_ecn_server_ok = self._check_ack_ecn(tr_server) 1188 | 1189 | if ecn_client_any_marked is False: 1190 | logging.info("Client did not mark any packets ECT(0) or ECT(1)") 1191 | else: 1192 | if ack_ecn_server_ok is False: 1193 | logging.info("Server did not send any ACK-ECN frames") 1194 | elif ecn_client_all_ok is False: 1195 | logging.info( 1196 | "Not all client packets were consistently marked with ECT(0) or ECT(1)" 1197 | ) 1198 | 1199 | if ecn_server_any_marked is False: 1200 | logging.info("Server did not mark any packets ECT(0) or ECT(1)") 1201 | else: 1202 | if ack_ecn_client_ok is False: 1203 | logging.info("Client did not send any ACK-ECN frames") 1204 | elif ecn_server_all_ok is False: 1205 | logging.info( 1206 | "Not all server packets were consistently marked with ECT(0) or ECT(1)" 1207 | ) 1208 | 1209 | if ( 1210 | ecn_client_all_ok 1211 | and ecn_server_all_ok 1212 | and ack_ecn_client_ok 1213 | and ack_ecn_server_ok 1214 | ): 1215 | return TestResult.SUCCEEDED 1216 | return TestResult.FAILED 1217 | 1218 | 1219 | class TestCasePortRebinding(TestCaseTransfer): 1220 | @staticmethod 1221 | def name(): 1222 | return "rebind-port" 1223 | 1224 | @staticmethod 1225 | def abbreviation(): 1226 | return "BP" 1227 | 1228 | @staticmethod 1229 | def testname(p: Perspective): 1230 | return "transfer" 1231 | 1232 | @staticmethod 1233 | def desc(): 1234 | return "Transfer completes under frequent port rebindings on the client side." 1235 | 1236 | def get_paths(self): 1237 | self._files = [ 1238 | self._generate_random_file(10 * MB), 1239 | ] 1240 | return self._files 1241 | 1242 | @staticmethod 1243 | def scenario() -> str: 1244 | """Scenario for the ns3 simulator""" 1245 | return "rebind --delay=15ms --bandwidth=10Mbps --queue=25 --first-rebind=1s --rebind-freq=5s" 1246 | 1247 | @staticmethod 1248 | def _addr(p: List, which: str) -> str: 1249 | return ( 1250 | getattr(p["ipv6"], which) 1251 | if "IPV6" in str(p.layers) 1252 | else getattr(p["ip"], which) 1253 | ) 1254 | 1255 | @staticmethod 1256 | def _path(p: List) -> Tuple[str, int, str, int]: 1257 | return ( 1258 | (TestCasePortRebinding._addr(p, "src"), int(getattr(p["udp"], "srcport"))), 1259 | (TestCasePortRebinding._addr(p, "dst"), int(getattr(p["udp"], "dstport"))), 1260 | ) 1261 | 1262 | def check(self) -> TestResult: 1263 | super().check() 1264 | if not self._keylog_file(): 1265 | logging.info("Can't check test result. SSLKEYLOG required.") 1266 | return TestResult.UNSUPPORTED 1267 | 1268 | result = super(TestCasePortRebinding, self).check() 1269 | if result != TestResult.SUCCEEDED: 1270 | return result 1271 | 1272 | tr_server = self._server_trace()._get_packets( 1273 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) + " quic" 1274 | ) 1275 | 1276 | cur = None 1277 | last = None 1278 | paths = set() 1279 | challenges = set() 1280 | for p in tr_server: 1281 | cur = self._path(p) 1282 | if last is None: 1283 | last = cur 1284 | continue 1285 | 1286 | if last != cur and cur not in paths: 1287 | paths.add(last) 1288 | last = cur 1289 | # Packet on new path, should have a PATH_CHALLENGE frame 1290 | if hasattr(p["quic"], "path_challenge.data") is False: 1291 | logging.info( 1292 | "First server packet on new path %s did not contain a PATH_CHALLENGE frame", 1293 | cur, 1294 | ) 1295 | logging.info(p["quic"]) 1296 | return TestResult.FAILED 1297 | else: 1298 | challenges.add(getattr(p["quic"], "path_challenge.data")) 1299 | paths.add(cur) 1300 | 1301 | logging.info("Server saw these paths used: %s", paths) 1302 | if len(paths) <= 1: 1303 | logging.info("Server saw only a single path in use; test broken?") 1304 | return TestResult.FAILED 1305 | 1306 | tr_client = self._client_trace()._get_packets( 1307 | self._client_trace()._get_direction_filter(Direction.FROM_CLIENT) + " quic" 1308 | ) 1309 | 1310 | responses = list( 1311 | set( 1312 | getattr(p["quic"], "path_response.data") 1313 | for p in tr_client 1314 | if hasattr(p["quic"], "path_response.data") 1315 | ) 1316 | ) 1317 | 1318 | unresponded = [c for c in challenges if c not in responses] 1319 | if unresponded != []: 1320 | logging.info("PATH_CHALLENGE without a PATH_RESPONSE: %s", unresponded) 1321 | return TestResult.FAILED 1322 | 1323 | return TestResult.SUCCEEDED 1324 | 1325 | 1326 | class TestCaseAddressRebinding(TestCasePortRebinding): 1327 | @staticmethod 1328 | def name(): 1329 | return "rebind-addr" 1330 | 1331 | @staticmethod 1332 | def abbreviation(): 1333 | return "BA" 1334 | 1335 | @staticmethod 1336 | def testname(p: Perspective): 1337 | return "transfer" 1338 | 1339 | @staticmethod 1340 | def desc(): 1341 | return "Transfer completes under frequent IP address and port rebindings on the client side." 1342 | 1343 | @staticmethod 1344 | def scenario() -> str: 1345 | """Scenario for the ns3 simulator""" 1346 | return ( 1347 | super(TestCaseAddressRebinding, TestCaseAddressRebinding).scenario() 1348 | + " --rebind-addr" 1349 | ) 1350 | 1351 | def check(self) -> TestResult: 1352 | super().check() 1353 | if not self._keylog_file(): 1354 | logging.info("Can't check test result. SSLKEYLOG required.") 1355 | return TestResult.UNSUPPORTED 1356 | 1357 | tr_server = self._server_trace()._get_packets( 1358 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) + " quic" 1359 | ) 1360 | 1361 | ips = set() 1362 | for p in tr_server: 1363 | ip_vers = "ip" 1364 | if "IPV6" in str(p.layers): 1365 | ip_vers = "ipv6" 1366 | ips.add(getattr(p[ip_vers], "dst")) 1367 | 1368 | logging.info("Server saw these client addresses: %s", ips) 1369 | if len(ips) <= 1: 1370 | logging.info( 1371 | "Server saw only a single client IP address in use; test broken?" 1372 | ) 1373 | return TestResult.FAILED 1374 | 1375 | result = super(TestCaseAddressRebinding, self).check() 1376 | if result != TestResult.SUCCEEDED: 1377 | return result 1378 | 1379 | return TestResult.SUCCEEDED 1380 | 1381 | 1382 | class TestCaseIPv6(TestCaseTransfer): 1383 | @staticmethod 1384 | def name(): 1385 | return "ipv6" 1386 | 1387 | @staticmethod 1388 | def abbreviation(): 1389 | return "6" 1390 | 1391 | @staticmethod 1392 | def testname(p: Perspective): 1393 | return "transfer" 1394 | 1395 | @staticmethod 1396 | def urlprefix() -> str: 1397 | return "https://server6:443/" 1398 | 1399 | @staticmethod 1400 | def desc(): 1401 | return "A transfer across an IPv6-only network succeeded." 1402 | 1403 | def get_paths(self): 1404 | self._files = [ 1405 | self._generate_random_file(5 * KB), 1406 | self._generate_random_file(10 * KB), 1407 | ] 1408 | return self._files 1409 | 1410 | def check(self) -> TestResult: 1411 | super().check() 1412 | result = super(TestCaseIPv6, self).check() 1413 | if result != TestResult.SUCCEEDED: 1414 | return result 1415 | 1416 | tr_server = self._server_trace()._get_packets( 1417 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) 1418 | + " quic && ip" 1419 | ) 1420 | 1421 | if tr_server: 1422 | logging.info("Packet trace contains %s IPv4 packets.", len(tr_server)) 1423 | return TestResult.FAILED 1424 | return TestResult.SUCCEEDED 1425 | 1426 | 1427 | class TestCaseConnectionMigration(TestCasePortRebinding): 1428 | @staticmethod 1429 | def name(): 1430 | return "connectionmigration" 1431 | 1432 | @staticmethod 1433 | def abbreviation(): 1434 | return "CM" 1435 | 1436 | @staticmethod 1437 | def testname(p: Perspective): 1438 | if p is Perspective.SERVER: 1439 | # Server needs to send preferred addresses 1440 | return "connectionmigration" 1441 | return "transfer" 1442 | 1443 | @staticmethod 1444 | def desc(): 1445 | return "A transfer succeeded during which the client performed an active migration." 1446 | 1447 | @staticmethod 1448 | def scenario() -> str: 1449 | return super(TestCaseTransfer, TestCaseTransfer).scenario() 1450 | 1451 | @staticmethod 1452 | def urlprefix() -> str: 1453 | """URL prefix""" 1454 | return "https://server46:443/" 1455 | 1456 | def get_paths(self): 1457 | self._files = [ 1458 | self._generate_random_file(2 * MB), 1459 | ] 1460 | return self._files 1461 | 1462 | def check(self) -> TestResult: 1463 | super().check() 1464 | # The parent check() method ensures that the client changed addresses 1465 | # and that PATH_CHALLENGE/RESPONSE frames were sent and received 1466 | result = super(TestCaseConnectionMigration, self).check() 1467 | if result != TestResult.SUCCEEDED: 1468 | return result 1469 | 1470 | tr_client = self._client_trace()._get_packets( 1471 | self._client_trace()._get_direction_filter(Direction.FROM_CLIENT) + " quic" 1472 | ) 1473 | 1474 | last = None 1475 | paths = set() 1476 | dcid = None 1477 | for p in tr_client: 1478 | cur = self._path(p) 1479 | if last is None: 1480 | last = cur 1481 | dcid = getattr(p["quic"], "dcid") 1482 | continue 1483 | 1484 | if last != cur and cur not in paths: 1485 | paths.add(last) 1486 | last = cur 1487 | # packet to different IP/port, should have a new DCID 1488 | if dcid == getattr(p["quic"], "dcid"): 1489 | logging.info( 1490 | "First client packet during active migration to %s used previous DCID %s", 1491 | cur, 1492 | dcid, 1493 | ) 1494 | logging.info(p["quic"]) 1495 | return TestResult.FAILED 1496 | dcid = getattr(p["quic"], "dcid") 1497 | logging.info( 1498 | "DCID changed to %s during active migration to %s", dcid, cur 1499 | ) 1500 | 1501 | return TestResult.SUCCEEDED 1502 | 1503 | 1504 | class TestCaseV2(TestCase): 1505 | @staticmethod 1506 | def name(): 1507 | return "v2" 1508 | 1509 | @staticmethod 1510 | def abbreviation(): 1511 | return "V2" 1512 | 1513 | @staticmethod 1514 | def desc(): 1515 | return "Server should select QUIC v2 in compatible version negotiation." 1516 | 1517 | def get_paths(self): 1518 | self._files = [self._generate_random_file(1 * KB)] 1519 | return self._files 1520 | 1521 | def check(self) -> TestResult: 1522 | super().check() 1523 | # Client should initially send QUIC v1 packet. It may send 1524 | # QUIC v2 packet. 1525 | versions = self._get_packet_versions( 1526 | self._client_trace().get_initial(Direction.FROM_CLIENT) 1527 | ) 1528 | if QUIC_VERSION not in versions: 1529 | logging.info( 1530 | "Wrong version in client Initial. Expected %s, got %s", 1531 | QUIC_VERSION, 1532 | versions, 1533 | ) 1534 | return TestResult.FAILED 1535 | 1536 | # Server Initial packets should have QUIC v2. It may send 1537 | # QUIC v1 packet before sending CRYPTO frame. 1538 | versions = self._get_packet_versions( 1539 | self._server_trace().get_initial(Direction.FROM_SERVER) 1540 | ) 1541 | if QUIC_V2 not in versions: 1542 | logging.info( 1543 | "Wrong version in server Initial. Expected %s, got %s", 1544 | QUIC_V2, 1545 | versions, 1546 | ) 1547 | return TestResult.FAILED 1548 | 1549 | # Client should use QUIC v2 for all Handshake packets. 1550 | versions = self._get_packet_versions( 1551 | self._client_trace().get_handshake(Direction.FROM_CLIENT) 1552 | ) 1553 | if len(versions) != 1: 1554 | logging.info( 1555 | "Expected exactly one version in client Handshake. Got %s", versions 1556 | ) 1557 | return TestResult.FAILED 1558 | if QUIC_V2 not in versions: 1559 | logging.info( 1560 | "Wrong version in client Handshake. Expected %s, got %s", 1561 | QUIC_V2, 1562 | versions, 1563 | ) 1564 | return TestResult.FAILED 1565 | 1566 | # Server should use QUIC v2 for all Handshake packets. 1567 | versions = self._get_packet_versions( 1568 | self._server_trace().get_handshake(Direction.FROM_SERVER) 1569 | ) 1570 | if len(versions) != 1: 1571 | logging.info( 1572 | "Expected exactly one version in server Handshake. Got %s", versions 1573 | ) 1574 | return TestResult.FAILED 1575 | if QUIC_V2 not in versions: 1576 | logging.info( 1577 | "Wrong version in server Handshake. Expected %s, got %s", 1578 | QUIC_V2, 1579 | versions, 1580 | ) 1581 | return TestResult.FAILED 1582 | 1583 | if not self._check_files(): 1584 | return TestResult.FAILED 1585 | 1586 | return TestResult.SUCCEEDED 1587 | 1588 | def _get_packet_versions(self, packets: List) -> set: 1589 | """Get a set of QUIC versions from packets.""" 1590 | return set([hex(int(p.version, 0)) for p in packets]) 1591 | 1592 | 1593 | class MeasurementGoodput(Measurement): 1594 | FILESIZE = 10 * MB 1595 | _result = 0.0 1596 | 1597 | @staticmethod 1598 | def name(): 1599 | return "goodput" 1600 | 1601 | @staticmethod 1602 | def unit() -> str: 1603 | return "kbps" 1604 | 1605 | @staticmethod 1606 | def testname(p: Perspective): 1607 | return "transfer" 1608 | 1609 | @staticmethod 1610 | def abbreviation(): 1611 | return "G" 1612 | 1613 | @staticmethod 1614 | def desc(): 1615 | return "Measures connection goodput over a 10Mbps link." 1616 | 1617 | @staticmethod 1618 | def repetitions() -> int: 1619 | return 5 1620 | 1621 | def get_paths(self): 1622 | self._files = [self._generate_random_file(self.FILESIZE)] 1623 | return self._files 1624 | 1625 | def check(self) -> TestResult: 1626 | super().check() 1627 | num_handshakes = self._count_handshakes() 1628 | if num_handshakes != 1: 1629 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 1630 | return TestResult.FAILED 1631 | if not self._check_version_and_files(): 1632 | return TestResult.FAILED 1633 | 1634 | packets, first, last = self._client_trace().get_1rtt_sniff_times( 1635 | Direction.FROM_SERVER 1636 | ) 1637 | 1638 | if last - first == 0: 1639 | return TestResult.FAILED 1640 | time = (last - first) / timedelta(milliseconds=1) 1641 | goodput = (8 * self.FILESIZE) / time 1642 | logging.debug( 1643 | "Transfering %d MB took %d ms. Goodput: %d kbps", 1644 | self.FILESIZE / MB, 1645 | time, 1646 | goodput, 1647 | ) 1648 | self._result = goodput 1649 | return TestResult.SUCCEEDED 1650 | 1651 | def result(self) -> float: 1652 | return self._result 1653 | 1654 | 1655 | class MeasurementCrossTraffic(MeasurementGoodput): 1656 | FILESIZE = 25 * MB 1657 | 1658 | @staticmethod 1659 | def name(): 1660 | return "crosstraffic" 1661 | 1662 | @staticmethod 1663 | def abbreviation(): 1664 | return "C" 1665 | 1666 | @staticmethod 1667 | def desc(): 1668 | return "Measures goodput over a 10Mbps link when competing with a TCP (cubic) connection." 1669 | 1670 | @staticmethod 1671 | def timeout() -> int: 1672 | return 180 1673 | 1674 | @staticmethod 1675 | def additional_envs() -> List[str]: 1676 | return ["IPERF_CONGESTION=cubic"] 1677 | 1678 | @staticmethod 1679 | def additional_containers() -> List[str]: 1680 | return ["iperf_server", "iperf_client"] 1681 | 1682 | 1683 | TESTCASES = [ 1684 | TestCaseHandshake, 1685 | TestCaseTransfer, 1686 | TestCaseLongRTT, 1687 | TestCaseChaCha20, 1688 | TestCaseMultiplexing, 1689 | TestCaseRetry, 1690 | TestCaseResumption, 1691 | TestCaseZeroRTT, 1692 | TestCaseHTTP3, 1693 | TestCaseBlackhole, 1694 | TestCaseKeyUpdate, 1695 | TestCaseECN, 1696 | TestCaseAmplificationLimit, 1697 | TestCaseHandshakeLoss, 1698 | TestCaseTransferLoss, 1699 | TestCaseHandshakeCorruption, 1700 | TestCaseTransferCorruption, 1701 | TestCaseIPv6, 1702 | TestCaseV2, 1703 | TestCasePortRebinding, 1704 | TestCaseAddressRebinding, 1705 | TestCaseConnectionMigration, 1706 | ] 1707 | 1708 | MEASUREMENTS = [ 1709 | MeasurementGoodput, 1710 | MeasurementCrossTraffic, 1711 | ] 1712 | -------------------------------------------------------------------------------- /trace.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | from enum import Enum 4 | from typing import List, Optional, Tuple 5 | 6 | import pyshark 7 | 8 | IP4_CLIENT = "193.167.0.100" 9 | IP4_SERVER = "193.167.100.100" 10 | IP6_CLIENT = "fd00:cafe:cafe:0::100" 11 | IP6_SERVER = "fd00:cafe:cafe:100::100" 12 | 13 | 14 | QUIC_V2 = hex(0x6B3343CF) 15 | 16 | 17 | class Direction(Enum): 18 | ALL = 0 19 | FROM_CLIENT = 1 20 | FROM_SERVER = 2 21 | INVALID = 3 22 | 23 | 24 | class PacketType(Enum): 25 | INITIAL = 1 26 | HANDSHAKE = 2 27 | ZERORTT = 3 28 | RETRY = 4 29 | ONERTT = 5 30 | VERSIONNEGOTIATION = 6 31 | INVALID = 7 32 | 33 | 34 | WIRESHARK_PACKET_TYPES = { 35 | PacketType.INITIAL: "0", 36 | PacketType.ZERORTT: "1", 37 | PacketType.HANDSHAKE: "2", 38 | PacketType.RETRY: "3", 39 | } 40 | 41 | 42 | WIRESHARK_PACKET_TYPES_V2 = { 43 | PacketType.INITIAL: "1", 44 | PacketType.ZERORTT: "2", 45 | PacketType.HANDSHAKE: "3", 46 | PacketType.RETRY: "0", 47 | } 48 | 49 | 50 | def get_direction(p) -> Direction: 51 | if (hasattr(p, "ip") and p.ip.src == IP4_CLIENT) or ( 52 | hasattr(p, "ipv6") and p.ipv6.src == IP6_CLIENT 53 | ): 54 | return Direction.FROM_CLIENT 55 | 56 | if (hasattr(p, "ip") and p.ip.src == IP4_SERVER) or ( 57 | hasattr(p, "ipv6") and p.ipv6.src == IP6_SERVER 58 | ): 59 | return Direction.FROM_SERVER 60 | 61 | return Direction.INVALID 62 | 63 | 64 | def get_packet_type(p) -> PacketType: 65 | if p.quic.header_form == "0": 66 | return PacketType.ONERTT 67 | if p.quic.version == "0x00000000": 68 | return PacketType.VERSIONNEGOTIATION 69 | if p.quic.version == QUIC_V2: 70 | for t, num in WIRESHARK_PACKET_TYPES_V2.items(): 71 | if p.quic.long_packet_type_v2 == num: 72 | return t 73 | return PacketType.INVALID 74 | for t, num in WIRESHARK_PACKET_TYPES.items(): 75 | if p.quic.long_packet_type == num: 76 | return t 77 | return PacketType.INVALID 78 | 79 | 80 | class TraceAnalyzer: 81 | _filename = "" 82 | 83 | def __init__(self, filename: str, keylog_file: Optional[str] = None): 84 | self._filename = filename 85 | self._keylog_file = keylog_file 86 | 87 | def _get_direction_filter(self, d: Direction) -> str: 88 | f = "(quic && !icmp) && " 89 | if d == Direction.FROM_CLIENT: 90 | return ( 91 | f + "(ip.src==" + IP4_CLIENT + " || ipv6.src==" + IP6_CLIENT + ") && " 92 | ) 93 | elif d == Direction.FROM_SERVER: 94 | return ( 95 | f + "(ip.src==" + IP4_SERVER + " || ipv6.src==" + IP6_SERVER + ") && " 96 | ) 97 | else: 98 | return f 99 | 100 | def _get_packets(self, f: str) -> List: 101 | override_prefs = {} 102 | if self._keylog_file is not None: 103 | override_prefs["tls.keylog_file"] = self._keylog_file 104 | cap = pyshark.FileCapture( 105 | self._filename, 106 | display_filter=f, 107 | override_prefs=override_prefs, 108 | disable_protocol="http3", # see https://github.com/quic-interop/quic-interop-runner/pull/179 109 | decode_as={"udp.port==443": "quic"}, 110 | ) 111 | packets = [] 112 | # If the pcap has been cut short in the middle of the packet, pyshark will crash. 113 | # See https://github.com/KimiNewt/pyshark/issues/390. 114 | try: 115 | for p in cap: 116 | if "quic" not in p: 117 | logging.info("Captured packet without quic layer: %r", p) 118 | continue 119 | packets.append(p) 120 | cap.close() 121 | except Exception as e: 122 | logging.debug(e) 123 | 124 | if self._keylog_file is not None: 125 | for p in packets: 126 | if hasattr(p["quic"], "decryption_failed"): 127 | logging.info("At least one QUIC packet could not be decrypted") 128 | logging.debug(p) 129 | break 130 | return packets 131 | 132 | def get_raw_packets(self, direction: Direction = Direction.ALL) -> List: 133 | packets = [] 134 | for packet in self._get_packets(self._get_direction_filter(direction) + "quic"): 135 | packets.append(packet) 136 | return packets 137 | 138 | def get_1rtt(self, direction: Direction = Direction.ALL) -> List: 139 | """Get all QUIC packets, one or both directions.""" 140 | packets, _, _ = self.get_1rtt_sniff_times(direction) 141 | return packets 142 | 143 | def get_1rtt_sniff_times( 144 | self, direction: Direction = Direction.ALL 145 | ) -> Tuple[List, datetime.datetime, datetime.datetime]: 146 | """Get all QUIC packets, one or both directions, and first and last sniff times.""" 147 | packets = [] 148 | first, last = 0, 0 149 | for packet in self._get_packets( 150 | self._get_direction_filter(direction) + "quic.header_form==0" 151 | ): 152 | for layer in packet.layers: 153 | if ( 154 | layer.layer_name == "quic" 155 | and not hasattr(layer, "long_packet_type") 156 | and not hasattr(layer, "long_packet_type_v2") 157 | ): 158 | if first == 0: 159 | first = packet.sniff_time 160 | last = packet.sniff_time 161 | packets.append(layer) 162 | return packets, first, last 163 | 164 | def get_vnp(self, direction: Direction = Direction.ALL) -> List: 165 | return self._get_packets( 166 | self._get_direction_filter(direction) + "quic.version==0" 167 | ) 168 | 169 | def _get_long_header_packets( 170 | self, packet_type: PacketType, direction: Direction 171 | ) -> List: 172 | packets = [] 173 | for packet in self._get_packets( 174 | self._get_direction_filter(direction) 175 | + "(quic.long.packet_type || quic.long.packet_type_v2)" 176 | ): 177 | for layer in packet.layers: 178 | if layer.layer_name == "quic" and ( 179 | ( 180 | hasattr(layer, "long_packet_type") 181 | and layer.long_packet_type 182 | == WIRESHARK_PACKET_TYPES[packet_type] 183 | ) 184 | or ( 185 | hasattr(layer, "long_packet_type_v2") 186 | and layer.long_packet_type_v2 187 | == WIRESHARK_PACKET_TYPES_V2[packet_type] 188 | ) 189 | ): 190 | packets.append(layer) 191 | return packets 192 | 193 | def get_initial(self, direction: Direction = Direction.ALL) -> List: 194 | """Get all Initial packets.""" 195 | return self._get_long_header_packets(PacketType.INITIAL, direction) 196 | 197 | def get_retry(self, direction: Direction = Direction.ALL) -> List: 198 | """Get all Retry packets.""" 199 | return self._get_long_header_packets(PacketType.RETRY, direction) 200 | 201 | def get_handshake(self, direction: Direction = Direction.ALL) -> List: 202 | """Get all Handshake packets.""" 203 | return self._get_long_header_packets(PacketType.HANDSHAKE, direction) 204 | 205 | def get_0rtt(self) -> List: 206 | """Get all 0-RTT packets.""" 207 | return self._get_long_header_packets(PacketType.ZERORTT, Direction.FROM_CLIENT) 208 | -------------------------------------------------------------------------------- /web/Caddyfile: -------------------------------------------------------------------------------- 1 | interop.seemann.io:443 2 | 3 | # The website must be mounted at /var/www/web. 4 | # The log directory must be mounted at /var/www/logs. 5 | root /logs/* /var/www/ 6 | root * /var/www/web 7 | 8 | # Allow external sites to download our JSON files. 9 | @json { 10 | path *.json 11 | } 12 | handle @json { 13 | header Access-Control-Allow-Origin "*" 14 | } 15 | 16 | # Allow external sites to download our qlog files. 17 | @qlog { 18 | path *.qlog 19 | } 20 | handle @qlog { 21 | header Access-Control-Allow-Origin "*" 22 | } 23 | 24 | # Allow external sites to download our sqlog files. 25 | @sqlog { 26 | path *.sqlog 27 | } 28 | handle @sqlog { 29 | header Access-Control-Allow-Origin "*" 30 | } 31 | 32 | file_server browse 33 | -------------------------------------------------------------------------------- /web/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is used to delete old logs from the log directory. 4 | # It removes log directories it deletes from logs.json. 5 | 6 | die() { 7 | echo "$0 " 8 | exit 1 9 | } 10 | 11 | if [ -z "$1" ] || [ -z "$2" ]; then 12 | die 13 | fi 14 | 15 | LOGDIR=$1 16 | AGE=$2 17 | 18 | find "$LOGDIR" -maxdepth 1 -type d -mtime "+$AGE" | while read -r line; do 19 | DIR=$(basename "$line") 20 | echo "Deleting $DIR" 21 | jq ". - [ \"$DIR\" ]" "$LOGDIR/logs.json" | sponge "$LOGDIR/logs.json" 22 | rm -rf "$line" 23 | done 24 | -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | QUIC Interop Runner 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | Fork me on GitHub 16 | 17 | 48 | 49 |
50 | 51 |

About

52 |

53 | This page documents the current interop status of various QUIC client and server implementations that have chosen to participate in this automated testing. It is updated several times per day; older results can be accessed via the "Run" selector in the title bar. 54 |

55 | 56 |

57 | In the following tables, results for client implementations are shown horizontally, results for server implementations are shown vertically. The results were obtained with QUIC version . 58 |

59 | 60 |

61 | It is straightforward to add your implementation to this automated testing effort; please see these simple instructions. 62 |

63 | 64 | 74 | 75 |
76 | 77 |

Results Filter

78 | 79 |
80 |
Client:
81 |
82 |
83 | 84 |
85 |
Server:
86 |
87 |
88 | 89 |
90 |
Test:
91 |
92 |
93 | 94 |

Interop Status

95 |
96 | 97 |

Measurement Results

98 |
99 |
100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /web/script.js: -------------------------------------------------------------------------------- 1 | /* globals document, window, console, URLSearchParams, XMLHttpRequest, $, history */ 2 | 3 | (function() { 4 | "use strict"; 5 | const map = { client: {}, server: {}, test: {} }; 6 | const color_type = { succeeded: "success", unsupported: "secondary disabled", failed: "danger"}; 7 | 8 | // see https://stackoverflow.com/a/43466724/ 9 | function formatTime(seconds) { 10 | return [ 11 | parseInt(seconds / 60 / 60), 12 | parseInt(seconds / 60 % 60), 13 | parseInt(seconds % 60) 14 | ].join(":").replace(/\b(\d)\b/g, "0$1"); 15 | } 16 | 17 | function getLogLink(log_dir, server, client, test, text, res) { 18 | var ttip = "Test: " + test + "
" + 19 | "Client: " + client + "
" + 20 | "Server: " + server + "
" + 21 | "Result: " + res + ""; 22 | 23 | var a = document.createElement("a"); 24 | a.className = "btn btn-xs btn-" + color_type[res] + " " + res + " test-" + text.toLowerCase(); 25 | var ttip_target = a; 26 | if (res !== "unsupported") { 27 | a.href = "logs/" + log_dir + "/" + server + "_" + client + "/" + test; 28 | a.target = "_blank"; 29 | ttip += "

(Click for logs.)"; 30 | } else { 31 | var s = document.createElement("span"); 32 | s.className = "d-inline-block"; 33 | s.tabIndex = 0; 34 | a.style = "pointer-events: none;"; 35 | s.appendChild(a); 36 | ttip_target = s; 37 | } 38 | ttip_target.title = ttip; 39 | $(ttip_target).attr("data-toggle", "tooltip").attr("data-placement", "bottom").attr("data-html", true).tooltip(); 40 | $(ttip_target).click(function() { $(this).blur(); }); 41 | a.appendChild(document.createTextNode(text)); 42 | return ttip_target; 43 | } 44 | 45 | function makeClickable(e, url) { 46 | e.title = url; 47 | $(e).attr("role", "button").attr("data-href", url).attr("data-toggle", "tooltip").tooltip(); 48 | e.onclick = function(e) { window.open(e.target.getAttribute("data-href")); }; 49 | } 50 | 51 | function makeColumnHeaders(t, result) { 52 | for(var i = 0; i <= result.servers.length; i++) 53 | t.appendChild(document.createElement("colgroup")); 54 | var thead = t.createTHead(); 55 | var row = thead.insertRow(0); 56 | var cell = document.createElement("th"); 57 | row.appendChild(cell); 58 | cell.scope = "col"; 59 | cell.className = "table-light client-any"; 60 | for(var i = 0; i < result.servers.length; i++) { 61 | cell = document.createElement("th"); 62 | row.appendChild(cell); 63 | cell.scope = "col"; 64 | cell.className = "table-light server-" + result.servers[i]; 65 | if (result.hasOwnProperty("urls")) 66 | makeClickable(cell, result.urls[result.servers[i]]); 67 | cell.innerHTML = result.servers[i]; 68 | } 69 | } 70 | 71 | function makeRowHeader(tbody, result, i) { 72 | var row = tbody.insertRow(i); 73 | var cell = document.createElement("th"); 74 | cell.scope = "row"; 75 | cell.className = "table-light client-" + result.clients[i]; 76 | if (result.hasOwnProperty("urls")) 77 | makeClickable(cell, result.urls[result.clients[i]]); 78 | cell.innerHTML = result.clients[i]; 79 | row.appendChild(cell); 80 | return row; 81 | } 82 | 83 | function fillInteropTable(result) { 84 | var index = 0; 85 | var appendResult = function(el, res, i, j) { 86 | result.results[index].forEach(function(item) { 87 | if(item.result !== res) return; 88 | el.appendChild(getLogLink(result.log_dir, result.servers[j], result.clients[i], item.name, item.abbr, res)); 89 | }); 90 | }; 91 | 92 | var t = document.getElementById("interop"); 93 | t.innerHTML = ""; 94 | makeColumnHeaders(t, result); 95 | var tbody = t.createTBody(); 96 | for(var i = 0; i < result.clients.length; i++) { 97 | var row = makeRowHeader(tbody, result, i); 98 | for(var j = 0; j < result.servers.length; j++) { 99 | var cell = row.insertCell(j+1); 100 | cell.className = "server-" + result.servers[j] + " client-" + result.clients[i]; 101 | appendResult(cell, "succeeded", i, j); 102 | appendResult(cell, "unsupported", i, j); 103 | appendResult(cell, "failed", i, j); 104 | index++; 105 | } 106 | } 107 | } 108 | 109 | function fillMeasurementTable(result) { 110 | var t = document.getElementById("measurements"); 111 | t.innerHTML = ""; 112 | makeColumnHeaders(t, result); 113 | var tbody = t.createTBody(); 114 | var index = 0; 115 | for(var i = 0; i < result.clients.length; i++) { 116 | var row = makeRowHeader(tbody, result, i); 117 | for(var j = 0; j < result.servers.length; j++) { 118 | var res = result.measurements[index]; 119 | var cell = row.insertCell(j+1); 120 | cell.className = "server-" + result.servers[j] + " client-" + result.clients[i]; 121 | for(var k = 0; k < res.length; k++) { 122 | var measurement = res[k]; 123 | var link = getLogLink(result.log_dir, result.servers[j], result.clients[i], measurement.name, measurement.abbr, measurement.result); 124 | if (measurement.result === "succeeded") 125 | link.innerHTML += ": " + measurement.details; 126 | cell.appendChild(link); 127 | } 128 | index++; 129 | } 130 | } 131 | } 132 | 133 | function dateToString(date) { 134 | return date.toLocaleDateString("en-US", { timeZone: 'UTC' }) + " " + date.toLocaleTimeString("en-US", { timeZone: 'UTC', timeZoneName: 'short' }); 135 | } 136 | 137 | function makeButton(type, text, tooltip) { 138 | var b = document.createElement("button"); 139 | b.innerHTML = text; 140 | b.id = type + "-" + text.toLowerCase(); 141 | if (tooltip) { 142 | b.title = tooltip; 143 | $(b).attr("data-toggle", "tooltip").attr("data-placement", "bottom").attr("data-html", true).tooltip(); 144 | } 145 | b.type = "button"; 146 | b.className = type + " btn btn-light"; 147 | $(b).click(clickButton); 148 | return b; 149 | } 150 | 151 | function toggleHighlight(e) { 152 | const comp = e.target.id.split("-"); 153 | const which = "." + comp[0] + "-" + comp[1] + "." + comp[2]; 154 | $(which).toggleClass("btn-highlight"); 155 | } 156 | 157 | function setButtonState() { 158 | var params = new URLSearchParams(history.state ? history.state.path : window.location.search); 159 | var show = {}; 160 | Object.keys(map).forEach(type => { 161 | map[type] = params.getAll(type).map(x => x.toLowerCase().split(",")).flat(); 162 | if (map[type].length === 0) 163 | map[type] = $("#" + type + " :button").get().map(x => x.id.replace(type + "-", "")); 164 | $("#" + type + " :button").removeClass("active font-weight-bold").addClass("text-muted font-weight-light").filter((i, e) => map[type].includes(e.id.replace(type + "-", ""))).addClass("active font-weight-bold").removeClass("text-muted font-weight-light"); 165 | show[type] = map[type].map(e => "." + type + "-" + e); 166 | }); 167 | 168 | $(".result td").add(".result th").add(".result td a").hide(); 169 | 170 | const show_classes = show.client.map(el1 => show.server.map(el2 => el1 + el2)).flat().join(); 171 | $(".client-any," + show_classes).show(); 172 | 173 | $(".result " + show.client.map(e => "th" + e).join()).show(); 174 | $(".result " + show.server.map(e => "th" + e).join()).show(); 175 | $(".measurement," + show.test.join()).show(); 176 | 177 | $("#test :button").each((i, e) => { 178 | $(e).find("span,br").remove(); 179 | var count = { succeeded: 0, unsupported: 0, failed: 0}; 180 | Object.keys(count).map(c => count[c] = $(".btn." + e.id + "." + c + ":visible").length); 181 | Object.keys(count).map(c => { 182 | e.appendChild(document.createElement("br")); 183 | var b = document.createElement("span"); 184 | b.innerHTML = count[c]; 185 | b.className = "btn btn-xs btn-" + color_type[c]; 186 | if (e.classList.contains("active") === false) 187 | b.className += " disabled"; 188 | b.id = e.id + "-" + c; 189 | $(b).hover(toggleHighlight, toggleHighlight); 190 | e.appendChild(b); 191 | }); 192 | }); 193 | } 194 | 195 | function clickButton(e) { 196 | function toggle(array, value) { 197 | var index = array.indexOf(value); 198 | if (index === -1) 199 | array.push(value); 200 | else 201 | array.splice(index, 1); 202 | } 203 | 204 | var b = $(e.target).closest(":button")[0]; 205 | b.blur(); 206 | const type = [...b.classList].filter(x => Object.keys(map).includes(x))[0]; 207 | const which = b.id.replace(type + "-", ""); 208 | 209 | var params = new URLSearchParams(history.state ? history.state.path : window.location.search); 210 | if (params.has(type) && params.get(type)) 211 | map[type] = params.get(type).split(","); 212 | else 213 | map[type] = $("#" + type + " :button").get().map(e => e.id.replace(type + "-", "")); 214 | 215 | toggle(map[type], which); 216 | params.set(type, map[type]); 217 | if (map[type].length === $("#" + type + " :button").length) 218 | params.delete(type); 219 | 220 | const comp = decodeURIComponent(params.toString()); 221 | var refresh = window.location.protocol + "//" + window.location.host + window.location.pathname + (comp ? "?" + comp : ""); 222 | window.history.pushState(null, null, refresh); 223 | 224 | setButtonState(); 225 | return false; 226 | } 227 | 228 | function makeTooltip(name, desc) { 229 | return "" + name + "" + (desc === undefined ? "" : "
" + desc); 230 | } 231 | 232 | function process(result) { 233 | var startTime = new Date(1000*result.start_time); 234 | var endTime = new Date(1000*result.end_time); 235 | var duration = result.end_time - result.start_time; 236 | document.getElementById("lastrun-start").innerHTML = dateToString(startTime); 237 | document.getElementById("lastrun-end").innerHTML = dateToString(endTime); 238 | document.getElementById("duration").innerHTML = formatTime(duration); 239 | document.getElementById("quic-vers").innerHTML = 240 | "" + result.quic_version + " (\"draft-" + result.quic_draft + "\")"; 241 | 242 | fillInteropTable(result); 243 | fillMeasurementTable(result); 244 | 245 | $("#client").add("#server").add("#test").empty(); 246 | $("#client").append(result.clients.map(e => makeButton("client", e))); 247 | $("#server").append(result.servers.map(e => makeButton("server", e))); 248 | if (result.hasOwnProperty("tests")) 249 | $("#test").append(Object.keys(result.tests).map(e => makeButton("test", e, makeTooltip(result.tests[e].name, result.tests[e].desc)))); 250 | else { 251 | // TODO: this else can eventually be removed, when all past runs have the test descriptions in the json 252 | const tcases = result.results.concat(result.measurements).flat().map(x => [x.abbr, x.name]).filter((e, i, a) => a.map(x => x[0]).indexOf(e[0]) === i); 253 | $("#test").append(tcases.map(e => makeButton("test", e[0], makeTooltip(e[1])))); 254 | } 255 | setButtonState(); 256 | 257 | $("table.result").delegate("td", "mouseover mouseleave", function(e) { 258 | const t = $(this).closest("table.result"); 259 | if (e.type === "mouseover") { 260 | $(this).parent().addClass("hover-xy"); 261 | t.children("colgroup").eq($(this).index()).addClass("hover-xy"); 262 | t.find("th").eq($(this).index()).addClass("hover-xy"); 263 | } else { 264 | $(this).parent().removeClass("hover-xy"); 265 | t.children("colgroup").eq($(this).index()).removeClass("hover-xy"); 266 | t.find("th").eq($(this).index()).removeClass("hover-xy"); 267 | } 268 | }); 269 | } 270 | 271 | function load(dir) { 272 | document.getElementsByTagName("body")[0].classList.add("loading"); 273 | document.getElementById("run-selection-msg").innerHTML = ""; 274 | var xhr = new XMLHttpRequest(); 275 | xhr.responseType = 'json'; 276 | xhr.open('GET', 'logs/' + dir + '/result.json'); 277 | xhr.onreadystatechange = function() { 278 | if(xhr.readyState !== XMLHttpRequest.DONE) return; 279 | if(xhr.status !== 200) { 280 | console.log("Received status: ", xhr.status); 281 | var run = dir.replace("logs_", ""); 282 | var errMsg = 'Error: could not locate result for "' + run + '" run'; 283 | document.getElementById("run-selection-msg").innerHTML = errMsg; 284 | var refresh = window.location.protocol + "//" + window.location.host + window.location.pathname + "?run=" + run; 285 | window.history.pushState(null, null, refresh); 286 | return; 287 | } 288 | var result = xhr.response; 289 | var selectedRun = result.log_dir.replace("logs_", ""); 290 | var refresh = window.location.protocol + "//" + window.location.host + window.location.pathname + "?run=" + selectedRun; 291 | window.history.pushState(null, null, refresh); 292 | process(result); 293 | document.getElementsByTagName("body")[0].classList.remove("loading"); 294 | }; 295 | xhr.send(); 296 | } 297 | 298 | var selectedRun = null; 299 | var queryParams = (new URL(document.location)).searchParams; 300 | if (queryParams.has("run") === true) { 301 | // if the request used a specific run (like ?run=123), then 302 | // load that specifc one 303 | selectedRun = queryParams.get("run") 304 | load("logs_" + selectedRun); 305 | } else { 306 | load("latest"); 307 | } 308 | 309 | // enable loading of old runs 310 | var xhr = new XMLHttpRequest(); 311 | xhr.responseType = 'json'; 312 | xhr.open('GET', 'logs/logs.json'); 313 | xhr.onreadystatechange = function() { 314 | if(xhr.readyState !== XMLHttpRequest.DONE) return; 315 | if(xhr.status !== 200) { 316 | console.log("Received status: ", xhr.status); 317 | return; 318 | } 319 | var s = document.createElement("select"); 320 | xhr.response.reverse().forEach(function(el) { 321 | var opt = document.createElement("option"); 322 | opt.innerHTML = el.replace("logs_", ""); 323 | opt.value = el; 324 | s.appendChild(opt); 325 | }); 326 | s.addEventListener("change", function(ev) { 327 | load(ev.currentTarget.value); 328 | }); 329 | document.getElementById("available-runs").appendChild(s); 330 | if (selectedRun != null) { 331 | // just set the selected run, no need to trigger "change" 332 | // event here 333 | s.value = "logs_" + selectedRun; 334 | } 335 | }; 336 | xhr.send(); 337 | })(); 338 | -------------------------------------------------------------------------------- /web/styles.css: -------------------------------------------------------------------------------- 1 | body.loading table.result, 2 | body.loading .navbar-nav .start-time, 3 | body.loading .navbar-nav .duration, 4 | body.loading .navbar-nav .end-time { 5 | opacity: 0.3; 6 | } 7 | 8 | .btn-xs { 9 | margin: 0 .1rem .1rem 0; 10 | padding: .01rem .025rem .01rem .025rem; 11 | font-size: .7rem; 12 | border-width: 2px; 13 | border-radius: 4px; 14 | font-weight: bold; 15 | } 16 | 17 | .btn-highlight { 18 | background-color: yellow !important; 19 | color: black !important; 20 | } 21 | 22 | .hover-xy { 23 | background-color: #ecf0f1; 24 | color: #e74c3c; 25 | } 26 | 27 | .table th { 28 | vertical-align: middle; 29 | } 30 | tr.hover-xy > th, th.hover-xy { 31 | background-color: #cfd9db; 32 | color: #e74c3c; 33 | } 34 | 35 | .sponsor { 36 | width: max-content; 37 | 38 | .btn { 39 | color: black; 40 | text-decoration: none; 41 | margin-bottom: 5px; 42 | 43 | i { 44 | margin-right: 5px; 45 | } 46 | } 47 | } 48 | --------------------------------------------------------------------------------