├── .github ├── actions │ ├── run-interop-hole-punch-test │ │ └── action.yml │ ├── run-interop-ping-test │ │ └── action.yml │ ├── run-perf-benchmark │ │ └── action.yml │ └── run-transport-interop-test │ │ └── action.yml ├── dependabot.yml └── workflows │ ├── add-new-impl-versions.yml │ ├── generated-pr.yml │ ├── hole-punch-interop.yml │ ├── perf.yml │ ├── stale.yml │ ├── transport-interop.yml │ └── update-badge.yml ├── .gitignore ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── ROADMAP.md ├── funding.json ├── hole-punch-interop ├── .gitignore ├── Makefile ├── README.md ├── compose-spec │ ├── compose-spec.json │ └── compose-spec.ts ├── dockerBuildWrapper.sh ├── helpers │ └── cache.ts ├── impl │ └── rust │ │ ├── .gitignore │ │ └── v0.53 │ │ └── Makefile ├── package-lock.json ├── package.json ├── renderResults.ts ├── router │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ └── run.sh ├── rust-relay │ ├── .dockerignore │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ ├── Dockerfile │ ├── Makefile │ └── src │ │ └── main.rs ├── src │ ├── compose-runner.ts │ ├── generator.ts │ ├── lib.ts │ └── stdoutParser.test.ts ├── testplans.ts ├── tsconfig.json └── versions.ts ├── perf ├── Makefile ├── README.md ├── impl │ ├── Makefile │ ├── go-libp2p │ │ ├── .gitignore │ │ └── v0.41 │ │ │ ├── .gitignore │ │ │ ├── Makefile │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ ├── main.go │ │ │ └── perf.go │ ├── https │ │ ├── .gitignore │ │ └── v0.1 │ │ │ ├── Makefile │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ └── main.go │ ├── js-libp2p │ │ ├── .gitignore │ │ └── v2.8 │ │ │ ├── Makefile │ │ │ ├── index.js │ │ │ ├── package-lock.json │ │ │ ├── package.json │ │ │ └── perf │ ├── quic-go │ │ ├── .gitignore │ │ └── v0.45 │ │ │ └── Makefile │ └── rust-libp2p │ │ ├── .gitignore │ │ └── v0.55 │ │ └── Makefile ├── runner │ ├── .gitignore │ ├── benchmark-results.json │ ├── package-lock.json │ ├── package.json │ ├── src │ │ ├── benchmark-result-type.ts │ │ ├── index.ts │ │ └── versions.ts │ ├── tsconfig.json │ └── versionsInput.json └── terraform │ ├── .gitignore │ ├── configs │ ├── README.md │ ├── local │ │ ├── .terraform.lock.hcl │ │ └── terraform.tf │ └── remote │ │ ├── .terraform.lock.hcl │ │ ├── terraform.tf │ │ └── terraform_override.tf │ └── modules │ ├── ci │ ├── cleanup.tf │ ├── files │ │ ├── .gitignore │ │ └── cleanup.py │ ├── main.tf │ └── test │ │ ├── cleanup.json │ │ ├── cleanup.sh │ │ └── cleanup.yml │ ├── long_lived │ ├── files │ │ └── user-data.sh │ └── main.tf │ └── short_lived │ ├── files │ └── .gitignore │ └── main.tf └── transport-interop ├── .gitignore ├── Makefile ├── README.md ├── compose-spec ├── compose-spec.json └── compose-spec.ts ├── dockerBuildWrapper.sh ├── helpers └── cache.ts ├── impl ├── go │ ├── .gitignore │ ├── v0.36 │ │ ├── Makefile │ │ └── version.lock │ ├── v0.37 │ │ ├── Makefile │ │ └── version.lock │ ├── v0.39 │ │ ├── Makefile │ │ └── version.lock │ └── v0.40 │ │ ├── Makefile │ │ └── version.lock ├── java │ ├── .gitignore │ ├── v0.0.1 │ │ └── Makefile │ ├── v0.6 │ │ └── Makefile │ └── v0.9 │ │ └── Makefile ├── js │ ├── .gitignore │ ├── v1.x │ │ ├── .aegir.js │ │ ├── BrowserDockerfile │ │ ├── Dockerfile │ │ ├── Makefile │ │ ├── package-lock.json │ │ ├── package.json │ │ ├── src │ │ │ └── index.ts │ │ ├── test │ │ │ ├── dialer.spec.ts │ │ │ ├── fixtures │ │ │ │ ├── get-libp2p.ts │ │ │ │ ├── redis-proxy.ts │ │ │ │ └── relay.ts │ │ │ └── listener.spec.ts │ │ └── tsconfig.json │ └── v2.x │ │ ├── .aegir.js │ │ ├── BrowserDockerfile │ │ ├── Dockerfile │ │ ├── Makefile │ │ ├── package-lock.json │ │ ├── package.json │ │ ├── src │ │ └── index.ts │ │ ├── test │ │ ├── dialer.spec.ts │ │ ├── fixtures │ │ │ ├── get-libp2p.ts │ │ │ ├── redis-proxy.ts │ │ │ └── relay.ts │ │ └── listener.spec.ts │ │ └── tsconfig.json ├── nim │ ├── mainv1.nim │ └── v1.0 │ │ ├── .gitignore │ │ ├── Dockerfile │ │ └── Makefile ├── rust-chromium │ ├── .gitignore │ ├── v0.53 │ │ └── Makefile │ └── v0.54 │ │ └── Makefile ├── rust │ ├── .gitignore │ ├── v0.53 │ │ └── Makefile │ └── v0.54 │ │ └── Makefile └── zig │ ├── .gitignore │ └── v0.0.1 │ └── Makefile ├── package-lock.json ├── package.json ├── renderResults.ts ├── src ├── compose-runner.ts ├── compose-stdout-helper.ts ├── generator.ts └── lib.ts ├── testplans.ts ├── tsconfig.json ├── versions.ts └── versionsInput.json /.github/actions/run-interop-hole-punch-test/action.yml: -------------------------------------------------------------------------------- 1 | name: "libp2p hole-punch interop test" 2 | description: "Run the libp2p hole-punch interoperability test suite" 3 | inputs: 4 | test-filter: 5 | description: "Filter which tests to run out of the created matrix" 6 | required: false 7 | default: "" 8 | test-ignore: 9 | description: "Exclude tests from the created matrix that include this string in their name" 10 | required: false 11 | default: "" 12 | extra-versions: 13 | description: "Space-separated paths to JSON files describing additional images" 14 | required: false 15 | default: "" 16 | s3-cache-bucket: 17 | description: "Which S3 bucket to use for container layer caching" 18 | required: false 19 | default: "" 20 | s3-access-key-id: 21 | description: "S3 Access key id for the cache" 22 | required: false 23 | default: "" 24 | s3-secret-access-key: 25 | description: "S3 secret key id for the cache" 26 | required: false 27 | default: "" 28 | aws-region: 29 | description: "Which AWS region to use" 30 | required: false 31 | default: "us-east-1" 32 | worker-count: 33 | description: "How many workers to use for the test" 34 | required: false 35 | default: "2" 36 | runs: 37 | using: "composite" 38 | steps: 39 | - name: Configure AWS credentials for S3 build cache 40 | if: inputs.s3-access-key-id != '' && inputs.s3-secret-access-key != '' 41 | run: | 42 | echo "PUSH_CACHE=true" >> $GITHUB_ENV 43 | shell: bash 44 | 45 | # This depends on where this file is within this repository. This walks up 46 | # from here to the hole-punch-interop folder 47 | - run: | 48 | WORK_DIR=$(realpath "$GITHUB_ACTION_PATH/../../../hole-punch-interop") 49 | echo "WORK_DIR=$WORK_DIR" >> $GITHUB_OUTPUT 50 | shell: bash 51 | id: find-workdir 52 | 53 | - uses: actions/setup-node@v4 54 | with: 55 | node-version: lts/* 56 | 57 | # Existence of /etc/buildkit/buildkitd.toml indicates that this is a 58 | # self-hosted runner. If so, we need to pass the config to the buildx 59 | # action. The config enables docker.io proxy which is required to 60 | # work around docker hub rate limiting. 61 | - run: | 62 | if test -f /etc/buildkit/buildkitd.toml; then 63 | echo "config=/etc/buildkit/buildkitd.toml" >> $GITHUB_OUTPUT 64 | fi 65 | shell: bash 66 | id: buildkit 67 | 68 | - name: Install more recent docker-compose version # https://stackoverflow.com/questions/54331949/having-networking-issues-with-docker-compose 69 | shell: bash 70 | run: | 71 | mkdir -p $HOME/.docker/cli-plugins 72 | wget -q -O- https://github.com/docker/compose/releases/download/v2.21.0/docker-compose-linux-x86_64 > $HOME/.docker/cli-plugins/docker-compose 73 | chmod +x $HOME/.docker/cli-plugins/docker-compose 74 | docker compose version 75 | 76 | - name: Set up Docker Buildx 77 | id: buildx 78 | uses: docker/setup-buildx-action@v2 79 | with: 80 | config: ${{ steps.buildkit.outputs.config }} 81 | 82 | - name: Install deps 83 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 84 | run: npm ci 85 | shell: bash 86 | 87 | - name: Load cache and build 88 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 89 | env: 90 | AWS_BUCKET: ${{ inputs.s3-cache-bucket }} 91 | AWS_REGION: ${{ inputs.aws-region }} 92 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 93 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 94 | run: npm run cache -- load 95 | shell: bash 96 | 97 | - name: Assert Git tree is clean. 98 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 99 | shell: bash 100 | run: | 101 | if [[ -n "$(git status --porcelain)" ]]; then 102 | echo "Git tree is dirty. This means that building an impl generated something that should probably be .gitignore'd" 103 | git status 104 | exit 1 105 | fi 106 | 107 | - name: Push the image cache 108 | if: env.PUSH_CACHE == 'true' 109 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 110 | env: 111 | AWS_BUCKET: ${{ inputs.s3-cache-bucket }} 112 | AWS_REGION: ${{ inputs.aws-region }} 113 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 114 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 115 | run: npm run cache -- push 116 | shell: bash 117 | 118 | - name: Run the test 119 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 120 | env: 121 | WORKER_COUNT: ${{ inputs.worker-count }} 122 | EXTRA_VERSION: ${{ inputs.extra-versions }} 123 | NAME_FILTER: ${{ inputs.test-filter }} 124 | NAME_IGNORE: ${{ inputs.test-ignore }} 125 | run: npm run test -- --extra-version=$EXTRA_VERSION --name-filter=$NAME_FILTER --name-ignore=$NAME_IGNORE 126 | shell: bash 127 | 128 | - name: Print the results 129 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 130 | run: cat results.csv 131 | shell: bash 132 | 133 | - name: Render results 134 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 135 | run: npm run renderResults > ./dashboard.md 136 | shell: bash 137 | 138 | - name: Show Dashboard Output 139 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 140 | run: cat ./dashboard.md >> $GITHUB_STEP_SUMMARY 141 | shell: bash 142 | 143 | - name: Exit with Error 144 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 145 | run: | 146 | if grep -q ":red_circle:" ./dashboard.md; then 147 | exit 1 148 | else 149 | exit 0 150 | fi 151 | shell: bash 152 | 153 | - uses: actions/upload-artifact@v4 154 | if: ${{ always() }} 155 | with: 156 | name: test-plans-output 157 | path: | 158 | ${{ steps.find-workdir.outputs.WORK_DIR }}/results.csv 159 | ${{ steps.find-workdir.outputs.WORK_DIR }}/dashboard.md 160 | ${{ steps.find-workdir.outputs.WORK_DIR }}/runs 161 | -------------------------------------------------------------------------------- /.github/actions/run-interop-ping-test/action.yml: -------------------------------------------------------------------------------- 1 | name: "libp2p ping interop test" 2 | description: "Run the libp2p ping interoperability test suite" 3 | inputs: 4 | test-filter: 5 | description: "Filter which tests to run out of the created matrix" 6 | required: false 7 | default: "" 8 | test-ignore: 9 | description: "Exclude tests from the created matrix that include this string in their name" 10 | required: false 11 | default: "" 12 | extra-versions: 13 | description: "Space-separated paths to JSON files describing additional images" 14 | required: false 15 | default: "" 16 | s3-cache-bucket: 17 | description: "Which S3 bucket to use for container layer caching" 18 | required: false 19 | default: "" 20 | s3-access-key-id: 21 | description: "S3 Access key id for the cache" 22 | required: false 23 | default: "" 24 | s3-secret-access-key: 25 | description: "S3 secret key id for the cache" 26 | required: false 27 | default: "" 28 | aws-region: 29 | description: "Which AWS region to use" 30 | required: false 31 | default: "us-east-1" 32 | worker-count: 33 | description: "How many workers to use for the test" 34 | required: false 35 | default: "2" 36 | runs: 37 | using: "composite" 38 | steps: 39 | - name: Configure AWS credentials for S3 build cache 40 | if: inputs.s3-access-key-id != '' && inputs.s3-secret-access-key != '' 41 | run: | 42 | echo "PUSH_CACHE=true" >> $GITHUB_ENV 43 | shell: bash 44 | 45 | # This depends on where this file is within this repository. This walks up 46 | # from here to the transport-interop folder 47 | - run: | 48 | WORK_DIR=$(realpath "$GITHUB_ACTION_PATH/../../../transport-interop") 49 | echo "WORK_DIR=$WORK_DIR" >> $GITHUB_OUTPUT 50 | shell: bash 51 | id: find-workdir 52 | 53 | - uses: actions/setup-node@v4 54 | with: 55 | node-version: lts/* 56 | 57 | # Existence of /etc/buildkit/buildkitd.toml indicates that this is a 58 | # self-hosted runner. If so, we need to pass the config to the buildx 59 | # action. The config enables docker.io proxy which is required to 60 | # work around docker hub rate limiting. 61 | - run: | 62 | if test -f /etc/buildkit/buildkitd.toml; then 63 | echo "config=/etc/buildkit/buildkitd.toml" >> $GITHUB_OUTPUT 64 | fi 65 | shell: bash 66 | id: buildkit 67 | 68 | - name: Set up Docker Buildx 69 | id: buildx 70 | uses: docker/setup-buildx-action@v2 71 | with: 72 | config: ${{ steps.buildkit.outputs.config }} 73 | 74 | - name: Install deps 75 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 76 | run: npm ci 77 | shell: bash 78 | 79 | - name: Load cache and build 80 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 81 | env: 82 | AWS_BUCKET: ${{ inputs.s3-cache-bucket }} 83 | AWS_REGION: ${{ inputs.aws-region }} 84 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 85 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 86 | run: npm run cache -- load 87 | shell: bash 88 | 89 | - name: Assert Git tree is clean. 90 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 91 | shell: bash 92 | run: | 93 | if [[ -n "$(git status --porcelain)" ]]; then 94 | echo "Git tree is dirty. This means that building an impl generated something that should probably be .gitignore'd" 95 | git status 96 | exit 1 97 | fi 98 | 99 | - name: Push the image cache 100 | if: env.PUSH_CACHE == 'true' 101 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 102 | env: 103 | AWS_BUCKET: ${{ inputs.s3-cache-bucket }} 104 | AWS_REGION: ${{ inputs.aws-region }} 105 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 106 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 107 | run: npm run cache -- push 108 | shell: bash 109 | 110 | - name: Run the test 111 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 112 | env: 113 | WORKER_COUNT: ${{ inputs.worker-count }} 114 | EXTRA_VERSION: ${{ inputs.extra-versions }} 115 | NAME_FILTER: ${{ inputs.test-filter }} 116 | NAME_IGNORE: ${{ inputs.test-ignore }} 117 | run: npm run test -- --extra-version=$EXTRA_VERSION --name-filter=$NAME_FILTER --name-ignore=$NAME_IGNORE 118 | shell: bash 119 | 120 | - name: Print the results 121 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 122 | run: cat results.csv 123 | shell: bash 124 | 125 | - name: Render results 126 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 127 | run: npm run renderResults > ./dashboard.md 128 | shell: bash 129 | 130 | - name: Show Dashboard Output 131 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 132 | run: cat ./dashboard.md >> $GITHUB_STEP_SUMMARY 133 | shell: bash 134 | 135 | - name: Exit with Error 136 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 137 | run: | 138 | if grep -q ":red_circle:" ./dashboard.md; then 139 | exit 1 140 | else 141 | exit 0 142 | fi 143 | shell: bash 144 | 145 | - uses: actions/upload-artifact@v4 146 | with: 147 | name: test-plans-output 148 | path: | 149 | ${{ steps.find-workdir.outputs.WORK_DIR }}/results.csv 150 | ${{ steps.find-workdir.outputs.WORK_DIR }}/dashboard.md 151 | -------------------------------------------------------------------------------- /.github/actions/run-perf-benchmark/action.yml: -------------------------------------------------------------------------------- 1 | name: "libp2p ping interop test" 2 | description: "Run the libp2p ping interoperability test suite" 3 | inputs: 4 | test-filter: 5 | description: "Filter which tests to run, only these implementations will be run" 6 | required: false 7 | default: "all" 8 | s3-access-key-id: 9 | description: "S3 Access key id for the terraform infrastructure" 10 | required: true 11 | default: "" 12 | s3-secret-access-key: 13 | description: "S3 secret key id for the terraform infrastructure" 14 | required: true 15 | default: "" 16 | runs: 17 | using: "composite" 18 | steps: 19 | - id: ssh 20 | shell: bash 21 | name: Generate SSH key 22 | working-directory: perf 23 | run: | 24 | make ssh-keygen 25 | echo "key<> $GITHUB_OUTPUT 26 | while read -r line; do 27 | echo "::add-mask::$line" 28 | echo "$line" >> $GITHUB_OUTPUT 29 | done < terraform/modules/short_lived/files/perf 30 | echo "EOF" >> $GITHUB_OUTPUT 31 | 32 | - name: Configure SSH 33 | uses: webfactory/ssh-agent@d4b9b8ff72958532804b70bbe600ad43b36d5f2e # v0.8.0 34 | with: 35 | ssh-private-key: ${{ steps.ssh.outputs.key }} 36 | 37 | - name: Configure git 38 | shell: bash 39 | run: | 40 | git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com>" 41 | git config --global user.name "${GITHUB_ACTOR}" 42 | 43 | - name: Configure terraform 44 | uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3 45 | 46 | - name: Init terraform 47 | id: init 48 | shell: bash 49 | env: 50 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 51 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 52 | TF_IN_AUTOMATION: "1" 53 | TF_INPUT: "0" 54 | run: terraform init 55 | working-directory: perf/terraform/configs/local 56 | 57 | - name: Apply terraform 58 | env: 59 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 60 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 61 | TF_IN_AUTOMATION: "1" 62 | TF_INPUT: "0" 63 | shell: bash 64 | run: terraform apply -auto-approve 65 | working-directory: perf/terraform/configs/local 66 | 67 | - id: server 68 | name: Retrieve server's IP 69 | shell: bash 70 | run: terraform output -raw server_ip 71 | working-directory: perf/terraform/configs/local 72 | 73 | - id: client 74 | name: Retrieve client's IP 75 | shell: bash 76 | run: terraform output -raw client_ip 77 | working-directory: perf/terraform/configs/local 78 | 79 | - name: Download dependencies 80 | shell: bash 81 | run: npm ci 82 | working-directory: perf/runner 83 | 84 | - name: Run tests 85 | shell: bash 86 | env: 87 | SERVER_IP: ${{ steps.server.outputs.stdout }} 88 | CLIENT_IP: ${{ steps.client.outputs.stdout }} 89 | run: npm run start -- --client-public-ip $CLIENT_IP --server-public-ip $SERVER_IP --test-filter ${{ inputs.test-filter }} 90 | working-directory: perf/runner 91 | 92 | - name: Push 93 | shell: bash 94 | if: github.event.inputs.push == 'true' 95 | env: 96 | GITHUB_TOKEN: ${{ github.token }} 97 | run: | 98 | git add benchmark-results.json 99 | git commit -m "perf: update benchmark results" 100 | git push 101 | gh pr comment --body "See new metrics at https://observablehq.com/@libp2p-workspace/performance-dashboard?branch=$(git rev-parse HEAD)" || true 102 | working-directory: perf/runner 103 | 104 | - name: Archive 105 | if: github.event.intputs.push == 'false' 106 | uses: actions/upload-artifact@v4 107 | with: 108 | name: benchmark-results 109 | path: perf/runner/benchmark-results.json 110 | 111 | - name: Destroy terraform 112 | shell: bash 113 | if: always() && steps.init.outputs.exitcode == 0 114 | env: 115 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 116 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 117 | TF_IN_AUTOMATION: "1" 118 | TF_INPUT: "0" 119 | run: terraform destroy -auto-approve 120 | working-directory: perf/terraform/configs/local 121 | -------------------------------------------------------------------------------- /.github/actions/run-transport-interop-test/action.yml: -------------------------------------------------------------------------------- 1 | name: "libp2p transport interop test" 2 | description: "Run the libp2p transport interoperability test suite" 3 | inputs: 4 | test-filter: 5 | description: "Filter which tests to run out of the created matrix" 6 | required: false 7 | default: "" 8 | upload-results: 9 | description: "Upload the test results as an artifact" 10 | required: false 11 | default: "true" 12 | test-results-suffix: 13 | description: "Suffix to add to the test results artifact name" 14 | required: false 15 | default: "" 16 | test-ignore: 17 | description: "Exclude tests from the created matrix that include this string in their name" 18 | required: false 19 | default: "" 20 | extra-versions: 21 | description: "Space-separated paths to JSON files describing additional images" 22 | required: false 23 | default: "" 24 | s3-cache-bucket: 25 | description: "Which S3 bucket to use for container layer caching" 26 | required: false 27 | default: "" 28 | s3-access-key-id: 29 | description: "S3 Access key id for the cache" 30 | required: false 31 | default: "" 32 | s3-secret-access-key: 33 | description: "S3 secret key id for the cache" 34 | required: false 35 | default: "" 36 | aws-region: 37 | description: "Which AWS region to use" 38 | required: false 39 | default: "us-east-1" 40 | worker-count: 41 | description: "How many workers to use for the test" 42 | required: false 43 | default: "2" 44 | timeout: 45 | description: "How many seconds to let each test run for" 46 | required: false 47 | verbose: 48 | description: "Enable verbose output" 49 | required: false 50 | default: false 51 | runs: 52 | using: "composite" 53 | steps: 54 | - name: Configure AWS credentials for S3 build cache 55 | if: inputs.s3-access-key-id != '' && inputs.s3-secret-access-key != '' 56 | run: | 57 | echo "PUSH_CACHE=true" >> $GITHUB_ENV 58 | shell: bash 59 | 60 | # This depends on where this file is within this repository. This walks up 61 | # from here to the transport-interop folder 62 | - run: | 63 | WORK_DIR=$(realpath "$GITHUB_ACTION_PATH/../../../transport-interop") 64 | echo "WORK_DIR=$WORK_DIR" >> $GITHUB_OUTPUT 65 | shell: bash 66 | id: find-workdir 67 | 68 | - uses: actions/setup-node@v4 69 | with: 70 | node-version: lts/* 71 | 72 | # Existence of /etc/buildkit/buildkitd.toml indicates that this is a 73 | # self-hosted runner. If so, we need to pass the config to the buildx 74 | # action. The config enables docker.io proxy which is required to 75 | # work around docker hub rate limiting. 76 | - run: | 77 | if test -f /etc/buildkit/buildkitd.toml; then 78 | echo "config=/etc/buildkit/buildkitd.toml" >> $GITHUB_OUTPUT 79 | fi 80 | shell: bash 81 | id: buildkit 82 | 83 | - name: Set up Docker Buildx 84 | id: buildx 85 | uses: docker/setup-buildx-action@v2 86 | with: 87 | config: ${{ steps.buildkit.outputs.config }} 88 | 89 | - name: Install deps 90 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 91 | run: npm ci 92 | shell: bash 93 | 94 | - name: Load cache and build 95 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 96 | env: 97 | AWS_BUCKET: ${{ inputs.s3-cache-bucket }} 98 | AWS_REGION: ${{ inputs.aws-region }} 99 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 100 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 101 | run: npm run cache -- load 102 | shell: bash 103 | 104 | - name: Assert Git tree is clean. 105 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 106 | shell: bash 107 | run: | 108 | if [[ -n "$(git status --porcelain)" ]]; then 109 | echo "Git tree is dirty. This means that building an impl generated something that should probably be .gitignore'd" 110 | git status 111 | exit 1 112 | fi 113 | 114 | - name: Push the image cache 115 | if: env.PUSH_CACHE == 'true' 116 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 117 | env: 118 | AWS_BUCKET: ${{ inputs.s3-cache-bucket }} 119 | AWS_REGION: ${{ inputs.aws-region }} 120 | AWS_ACCESS_KEY_ID: ${{ inputs.s3-access-key-id }} 121 | AWS_SECRET_ACCESS_KEY: ${{ inputs.s3-secret-access-key }} 122 | run: npm run cache -- push 123 | shell: bash 124 | 125 | - name: Run the test 126 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 127 | env: 128 | WORKER_COUNT: ${{ inputs.worker-count }} 129 | EXTRA_VERSION: ${{ inputs.extra-versions }} 130 | NAME_FILTER: ${{ inputs.test-filter }} 131 | NAME_IGNORE: ${{ inputs.test-ignore }} 132 | TIMEOUT: ${{ inputs.timeout }} 133 | VERBOSE: ${{ inputs.verbose }} 134 | run: npm run test -- --extra-version=$EXTRA_VERSION --name-filter="$NAME_FILTER" --name-ignore="$NAME_IGNORE" --verbose="$VERBOSE" 135 | shell: bash 136 | 137 | - name: Print the results 138 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 139 | run: cat results.csv 140 | shell: bash 141 | 142 | - name: Render results 143 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 144 | run: npm run renderResults > ./dashboard.md 145 | shell: bash 146 | 147 | - name: Show Dashboard Output 148 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 149 | run: cat ./dashboard.md >> $GITHUB_STEP_SUMMARY 150 | shell: bash 151 | 152 | - name: Exit with Error 153 | working-directory: ${{ steps.find-workdir.outputs.WORK_DIR }} 154 | run: | 155 | if grep -q ":red_circle:" ./dashboard.md; then 156 | exit 1 157 | else 158 | exit 0 159 | fi 160 | shell: bash 161 | - name: Upload test results 162 | if: ${{ inputs.upload-results == 'true' }} 163 | uses: actions/upload-artifact@v4 164 | with: 165 | name: ${{ inputs.test-results-suffix && format('test-plans-output-{0}', inputs.test-results-suffix) || 'test-plans-output' }} 166 | path: | 167 | ${{ steps.find-workdir.outputs.WORK_DIR }}/results.csv 168 | ${{ steps.find-workdir.outputs.WORK_DIR }}/dashboard.md 169 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: npm 4 | # this should point to the latest js-libp2p major in order to automate 5 | # updating deps automatically after a release 6 | directory: "/transport-interop/impl/js/v2.x" 7 | schedule: 8 | interval: daily 9 | time: "10:00" 10 | open-pull-requests-limit: 20 11 | commit-message: 12 | prefix: "deps" 13 | prefix-development: "deps(dev)" 14 | groups: 15 | libp2p-deps: # update all deps together 16 | patterns: 17 | - "*" 18 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/hole-punch-interop.yml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_dispatch: 3 | pull_request: 4 | paths: 5 | - 'hole-punch-interop/**' 6 | push: 7 | branches: 8 | - "master" 9 | paths: 10 | - 'hole-punch-interop/**' 11 | 12 | name: libp2p holepunching interop test 13 | 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.ref }} 16 | cancel-in-progress: true 17 | 18 | jobs: 19 | run-hole-punch-interop: 20 | runs-on: ['self-hosted', 'linux', 'x64', '4xlarge'] # https://github.com/pl-strflt/tf-aws-gh-runner/blob/main/runners.tf 21 | # Uncomment to test for flakiness. 22 | # strategy: 23 | # matrix: 24 | # dim1: ['a', 'b', 'c', 'd', 'e'] 25 | # dim2: [1, 2, 3, 4, 5] 26 | # fail-fast: false 27 | steps: 28 | - uses: actions/checkout@v3 29 | - uses: ./.github/actions/run-interop-hole-punch-test 30 | with: 31 | s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} 32 | s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} 33 | s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} 34 | worker-count: 16 35 | -------------------------------------------------------------------------------- /.github/workflows/perf.yml: -------------------------------------------------------------------------------- 1 | name: libp2p perf test 2 | 3 | # How to configure a repository for running this workflow: 4 | # 1. Configure auth for the AWS provider as per https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration 5 | # 2. Run 'terraform init' and 'terraform apply' in 'perf/terraform/configs/remote' to create the resources needed for this workflow 6 | # 3. Go to https://console.aws.amazon.com/iamv2/home?#/users/details/perf?section=security_credentials 7 | # 4. Click 'Create access key' to get the access key ID and secret access key 8 | # 5. Go to https://github.com/libp2p/test-plans/settings/secrets/actions 9 | # 6. Click 'New repository secret', set the name to 'PERF_AWS_SECRET_ACCESS_KEY', and paste the secret access key from step 5 10 | # 7. Go to https://github.com/libp2p/test-plans/settings/variables/actions 11 | # 8. Click 'New repository variable', set the name to 'PERF_AWS_ACCESS_KEY_ID', and paste the access key ID from step 5 12 | 13 | on: 14 | workflow_dispatch: 15 | inputs: 16 | push: 17 | description: "Push the benchmark results to the repository" 18 | required: false 19 | default: "true" 20 | 21 | jobs: 22 | perf: 23 | name: Perf 24 | runs-on: ubuntu-latest 25 | timeout-minutes: 360 # 6 hours is the maximum job execution time 26 | defaults: 27 | run: 28 | shell: bash 29 | working-directory: perf 30 | steps: 31 | - name: Checkout test-plans 32 | uses: actions/checkout@v3 33 | with: 34 | repository: ${{ github.repository }} 35 | ref: ${{ github.ref }} 36 | - uses: ./.github/actions/run-perf-benchmark 37 | with: 38 | s3-access-key-id: ${{ vars.PERF_AWS_ACCESS_KEY_ID }} 39 | s3-secret-access-key: ${{ secrets.PERF_AWS_SECRET_ACCESS_KEY }} 40 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/transport-interop.yml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_dispatch: 3 | pull_request: 4 | paths: 5 | - "transport-interop/**" 6 | - ".github/actions/run-transport-interop-test/action.yml" 7 | - ".github/workflows/transport-interop.yml" 8 | push: 9 | branches: 10 | - "master" 11 | paths: 12 | - "transport-interop/**" 13 | 14 | name: libp2p transport interop test 15 | 16 | jobs: 17 | run-transport-interop: 18 | runs-on: ["self-hosted", "linux", "x64", "4xlarge"] # https://github.com/pl-strflt/tf-aws-gh-runner/blob/main/runners.tf 19 | steps: 20 | - uses: actions/checkout@v3 21 | - uses: ./.github/actions/run-transport-interop-test 22 | with: 23 | s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }} 24 | s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }} 25 | s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }} 26 | worker-count: 16 27 | test-ignore: "java-v0.9 x zig-v0.0.1 (quic-v1)|zig-v0.0.1 x java-v0.9 (quic-v1)" 28 | build-without-secrets: 29 | runs-on: ["self-hosted", "linux", "x64", "4xlarge"] # https://github.com/pl-strflt/tf-aws-gh-runner/blob/main/runners.tf 30 | steps: 31 | - uses: actions/checkout@v3 32 | # Purposely not using secrets to replicate how forks will behave. 33 | - uses: ./.github/actions/run-transport-interop-test 34 | with: 35 | # It's okay to not run the tests, we only care to check if the tests build without cache. 36 | upload-results: false 37 | test-filter: '"no test matches this, skip all"' 38 | -------------------------------------------------------------------------------- /.github/workflows/update-badge.yml: -------------------------------------------------------------------------------- 1 | name: Update Badge 2 | 3 | on: 4 | workflow_run: 5 | workflows: 6 | - libp2p transport interop test 7 | types: 8 | - completed 9 | branches: 10 | - master 11 | 12 | defaults: 13 | run: 14 | shell: bash 15 | 16 | concurrency: 17 | group: ${{ github.workflow }} 18 | cancel-in-progress: true 19 | 20 | env: 21 | BADGE_NAME: Interop Dashboard 22 | 23 | jobs: 24 | update-badge: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - id: workflow 28 | env: 29 | WORKFLOW_PATH: ${{ github.event.workflow.path }} 30 | run: echo "name=${WORKFLOW_PATH#.github/workflows/}" >> $GITHUB_OUTPUT 31 | - uses: pl-strflt/job-summary-url-action@v1 32 | id: metadata 33 | with: 34 | workflow: ${{ steps.workflow.outputs.name }} 35 | run_id: ${{ github.event.workflow_run.id }} 36 | run_attempt: ${{ github.event.workflow_run.run_attempt }} 37 | job: latest 38 | - uses: actions/checkout@v3 39 | - id: update 40 | uses: actions/github-script@v6 41 | env: 42 | BADGE_URL: ${{ github.event.workflow.badge_url }} 43 | SUMMARY_URL: ${{ steps.metadata.outputs.job_summary_url }} 44 | with: 45 | script: | 46 | const fs = require('fs') 47 | 48 | const badgeName = process.env.BADGE_NAME 49 | const badgeURL = process.env.BADGE_URL 50 | const refName = process.env.GITHUB_REF_NAME 51 | const summaryURL = process.env.SUMMARY_URL 52 | 53 | const searchValue = new RegExp(`\\[!\\[${badgeName}\\]\\(.*\\)\\]\\(.*\\)`, 'g') 54 | const replaceValue = `[![${badgeName}](${badgeURL}?branch=${refName})](${summaryURL})` 55 | 56 | console.log(`Searching for: ${searchValue}`) 57 | console.log(`To replace it with: ${replaceValue}`) 58 | 59 | const readme = fs.readFileSync('README.md').toString() 60 | const updatedReadme = readme.replace(searchValue, replaceValue) 61 | 62 | if (readme !== updatedReadme) { 63 | console.log('Updating README') 64 | fs.writeFileSync('README.md', updatedReadme) 65 | return true 66 | } else { 67 | console.log('README does not need to be updated') 68 | return false 69 | } 70 | # https://github.com/orgs/community/discussions/26560 71 | - if: steps.update.outputs.result == 'true' 72 | run: | 73 | git config user.email "41898282+github-actions[bot]@users.noreply.github.com" 74 | git config user.name "github-actions[bot]" 75 | - if: steps.update.outputs.result == 'true' 76 | run: | 77 | git add README.md 78 | git commit -m 'chore: update the link to the interop dashboard [skip ci]' 79 | git push 80 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # ignore transient paths for pubsub tests 3 | pubsub/venv/ 4 | pubsub/scripts/output/ 5 | pubsub/scripts/config-snapshot.json 6 | pubsub/scripts/configs/snapshot.json 7 | __pycache__/ 8 | .ipynb_checkpoints/ 9 | 10 | # Created by https://www.gitignore.io/api/go,visualstudiocode 11 | # Edit at https://www.gitignore.io/?templates=go,visualstudiocode 12 | 13 | ### Go ### 14 | # Binaries for programs and plugins 15 | *.exe 16 | *.exe~ 17 | *.dll 18 | *.so 19 | *.dylib 20 | 21 | # Test binary, built with `go test -c` 22 | *.test 23 | 24 | # Output of the go coverage tool, specifically when used with LiteIDE 25 | *.out 26 | 27 | # Dependency directories (remove the comment below to include it) 28 | # vendor/ 29 | 30 | ### Go Patch ### 31 | /vendor/ 32 | /Godeps/ 33 | 34 | ### VisualStudioCode ### 35 | .vscode/* 36 | 37 | ### VisualStudioCode Patch ### 38 | # Ignore all local history of files 39 | .history 40 | 41 | # End of https://www.gitignore.io/api/go,visualstudiocode 42 | 43 | ### NodeJS 44 | 45 | node_modules 46 | dist 47 | 48 | # ignore system files 49 | .DS_Store 50 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 2 | 3 | http://www.apache.org/licenses/LICENSE-2.0 4 | 5 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 6 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Interoperability/end to end test-plans & performance benchmarking for libp2p 2 | 3 | [![Interop Dashboard](https://github.com/libp2p/test-plans/workflows/libp2p%20transport%20interop%20test/badge.svg?branch=master)](https://github.com/libp2p/test-plans/actions/runs/14766259564/attempts/1#summary-41458208947) 4 | 5 | [![Made by Protocol Labs](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://protocol.ai) 6 | 7 | This repository contains: 8 | * interoperability tests for libp2p's transport layers modules across different implementations and versions 9 | * components to run performance benchmarks for different libp2p implementations 10 | 11 | ## Transport Interop 12 | ### Specs 13 | 14 | Please see our first specification for interoperability tests between transports, multiplexers, and secure channels here: [Transport Interoperability Test Specs](transport-interop/README.md) 15 | 16 | More specs to come soon! 17 | 18 | ## History 19 | 20 | These test-plans historically used Testground. To read why we're now using `docker compose` instead please see: [Why we're moving away from Testground](https://github.com/libp2p/test-plans/issues/103) 21 | 22 | ## Performance Benchmarking 23 | 24 | Please see the [benchmarking README](./perf#libp2p-performance-benchmarking). 25 | 26 | ## Roadmap 27 | 28 | Our roadmap for test-plans can be found here: https://github.com/libp2p/test-plans/blob/master/ROADMAP.md 29 | 30 | It represents current projects the test-plans maintainers are focused on and provides an estimation of completion targets. 31 | It is complementary to those of [go-libp2p](https://github.com/libp2p/go-libp2p/blob/master/ROADMAP.md), [rust-libp2p](https://github.com/libp2p/rust-libp2p/blob/master/ROADMAP.md), [js-libp2p](https://github.com/libp2p/js-libp2p/blob/master/ROADMAP.md), and the [overarching libp2p project roadmap](https://github.com/libp2p/specs/blob/master/ROADMAP.md). 32 | 33 | ## License 34 | 35 | Dual-licensed: [MIT](./LICENSE-MIT), [Apache Software License v2](./LICENSE-APACHE), by way of the 36 | [Permissive License Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/). 37 | -------------------------------------------------------------------------------- /funding.json: -------------------------------------------------------------------------------- 1 | { 2 | "opRetro": { 3 | "projectId": "0x966804cb492e1a4bde5d781a676a44a23d69aa5dd2562fa7a4f95bb606021c8b" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /hole-punch-interop/.gitignore: -------------------------------------------------------------------------------- 1 | # For now, not committing image.json files 2 | image.json 3 | 4 | results.csv 5 | runs/ 6 | 7 | node_modules/ 8 | -------------------------------------------------------------------------------- /hole-punch-interop/Makefile: -------------------------------------------------------------------------------- 1 | RUST_SUBDIRS := $(wildcard impl/rust/*/.) 2 | GO_SUBDIRS := $(wildcard impl/go/*/.) 3 | 4 | all: rust-relay router $(RUST_SUBDIRS) $(GO_SUBDIRS) 5 | rust-relay: 6 | $(MAKE) -C rust-relay 7 | router: 8 | $(MAKE) -C router 9 | $(RUST_SUBDIRS): 10 | $(MAKE) -C $@ 11 | $(GO_SUBDIRS): 12 | $(MAKE) -C $@ 13 | clean: 14 | $(MAKE) -C rust-relay clean 15 | $(MAKE) -C router clean 16 | $(MAKE) -C $(RUST_SUBDIRS) clean 17 | $(MAKE) -C $(GO_SUBDIRS) clean 18 | 19 | .PHONY: rust-relay router all $(RUST_SUBDIRS) $(GO_SUBDIRS) 20 | -------------------------------------------------------------------------------- /hole-punch-interop/README.md: -------------------------------------------------------------------------------- 1 | # Hole punch tests 2 | 3 | ## How to run locally 4 | 5 | 1. `npm install` 6 | 2. `make` 7 | 3. `npm run test` 8 | 9 | ## Client configuration 10 | 11 | | env variable | possible values | 12 | |--------------|-----------------| 13 | | MODE | listen \| dial | 14 | | TRANSPORT | tcp \| quic | 15 | 16 | - For TCP, the client MUST use noise + yamux to upgrade the connection. 17 | - The relayed connection MUST use noise + yamux. 18 | 19 | ## Test flow 20 | 21 | 1. The relay starts and pushes its address to the following redis keys: 22 | - `RELAY_TCP_ADDRESS` for the TCP test 23 | - `RELAY_QUIC_ADDRESS` for the QUIC test 24 | 1. Upon start-up, clients connect to a redis server at `redis:6379` and block until this redis key comes available. 25 | They then dial the relay on the provided address. 26 | 1. The relay supports identify. 27 | Implementations SHOULD use that to figure out their external address next. 28 | 1. Once connected to the relay, a client in `MODE=listen` should listen on the relay and make a reservation. 29 | Once the reservation is made, it pushes its `PeerId` to the redis key `LISTEN_CLIENT_PEER_ID`. 30 | 1. A client in `MODE=dial` blocks on the availability of `LISTEN_CLIENT_PEER_ID`. 31 | Once available, it dials `/p2p-circuit/`. 32 | 1. Upon a successful hole-punch, the peer in `MODE=dial` measures the RTT across the newly established connection. 33 | 1. The RTT MUST be printed to stdout in the following format: 34 | ```json 35 | { "rtt_to_holepunched_peer_millis": 12 } 36 | ``` 37 | 1. Once printed, the dialer MUST exit with `0`. 38 | 39 | ## Requirements for implementations 40 | 41 | - Docker containers MUST have a binary called `hole-punch-client` in their $PATH 42 | - MUST have `dig`, `curl`, `jq` and `tcpdump` installed 43 | - Listener MUST NOT early-exit but wait to be killed by test runner 44 | - Logs MUST go to stderr, RTT json MUST go to stdout 45 | - Dialer and lister both MUST use 0RTT negotiation for protocols 46 | - Implementations SHOULD disable timeouts on the redis client, i.e. use `0` 47 | - Implementations SHOULD exit early with a non-zero exit code if anything goes wrong 48 | - Implementations MUST set `TCP_NODELAY` for the TCP transport 49 | - Implements MUST make sure connections are being kept alive 50 | 51 | ## Design notes 52 | 53 | The design of this test runner is heavily influenced by [multidim-interop](../multidim-interop) but differs in several ways. 54 | 55 | All files related to test runs will be written to the [./runs](./runs) directory. 56 | This includes the `docker-compose.yml` files of each individual run as well as logs and `tcpdump`'s for the dialer and listener. 57 | 58 | The docker-compose file uses 6 containers in total: 59 | 60 | - 1 redis container for orchestrating the test 61 | - 1 [relay](./rust-relay) 62 | - 1 hole-punch client in `MODE=dial` 63 | - 1 hole-punch client in `MODE=listen` 64 | - 2 [routers](./router): 1 per client 65 | 66 | The networks are allocated by docker-compose. 67 | We dynamically fetch the IPs and subnets as part of a startup script to set the correct IP routes. 68 | 69 | In total, we have three networks: 70 | 71 | 1. `lan_dialer` 72 | 2. `lan_listener` 73 | 3. `internet` 74 | 75 | The two LANs host a router and a client each whereas the relay is connected (without a router) to the `internet` network. 76 | On startup of the clients, we add an `ip route` that redirects all traffic to the corresponding `router` container. 77 | The router container masquerades all traffic upon forwarding, see the [README](./router/README.md) for details. 78 | 79 | ## Running a single test 80 | 81 | 1. Build all containers using `make` 82 | 1. Generate all test definitions using `npm run test -- --no-run` 83 | 1. Pick the desired test from the [runs](./runs) directory 84 | 1. Execute it using `docker compose up` 85 | -------------------------------------------------------------------------------- /hole-punch-interop/dockerBuildWrapper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env /bin/bash 2 | 3 | CACHING_OPTIONS="" 4 | # If in CI and we have a defined cache bucket, use caching 5 | if [[ -n "${CI}" ]] && [[ -n "${AWS_BUCKET}" ]]; then 6 | CACHING_OPTIONS="\ 7 | --cache-to type=s3,mode=max,bucket=$AWS_BUCKET,region=$AWS_REGION,prefix=buildCache,name=$IMAGE_NAME \ 8 | --cache-from type=s3,mode=max,bucket=$AWS_BUCKET,region=$AWS_REGION,prefix=buildCache,name=$IMAGE_NAME" 9 | fi 10 | 11 | docker buildx build \ 12 | --load \ 13 | -t $IMAGE_NAME $CACHING_OPTIONS "$@" 14 | -------------------------------------------------------------------------------- /hole-punch-interop/helpers/cache.ts: -------------------------------------------------------------------------------- 1 | const AWS_BUCKET = process.env.AWS_BUCKET; 2 | const scriptDir = __dirname; 3 | 4 | import * as crypto from 'crypto'; 5 | import * as fs from 'fs'; 6 | import * as os from 'os'; 7 | import * as path from 'path'; 8 | import * as child_process from 'child_process'; 9 | import ignore, { Ignore } from 'ignore' 10 | 11 | const holePunchInteropDir = path.join(scriptDir, '..') 12 | const arch = child_process.execSync('docker info -f "{{.Architecture}}"').toString().trim(); 13 | 14 | enum Mode { 15 | LoadCache = 1, 16 | PushCache, 17 | } 18 | const modeStr = process.argv[2]; 19 | let mode: Mode 20 | switch (modeStr) { 21 | case "push": 22 | mode = Mode.PushCache 23 | break 24 | case "load": 25 | mode = Mode.LoadCache 26 | break 27 | default: 28 | throw new Error(`Unknown mode: ${modeStr}`) 29 | } 30 | 31 | (async () => { 32 | for (const implFamily of fs.readdirSync(path.join(holePunchInteropDir, 'impl'))) { 33 | const ig = ignore() 34 | 35 | addGitignoreIfPresent(ig, path.join(holePunchInteropDir, ".gitignore")) 36 | addGitignoreIfPresent(ig, path.join(holePunchInteropDir, "..", ".gitignore")) 37 | 38 | const implFamilyDir = path.join(holePunchInteropDir, 'impl', implFamily) 39 | 40 | addGitignoreIfPresent(ig, path.join(implFamilyDir, ".gitignore")) 41 | 42 | for (const impl of fs.readdirSync(implFamilyDir)) { 43 | const implFolder = fs.realpathSync(path.join(implFamilyDir, impl)); 44 | 45 | if (!fs.statSync(implFolder).isDirectory()) { 46 | continue; 47 | } 48 | 49 | await loadCacheOrBuild(implFolder, ig); 50 | } 51 | 52 | await loadCacheOrBuild("router", ig); 53 | await loadCacheOrBuild("rust-relay", ig); 54 | } 55 | })() 56 | 57 | async function loadCacheOrBuild(dir: string, ig: Ignore) { 58 | addGitignoreIfPresent(ig, path.join(dir, ".gitignore")) 59 | 60 | // Get all the files in the dir: 61 | let files = walkDir(dir) 62 | // Turn them into relative paths: 63 | files = files.map(f => f.replace(dir + "/", "")) 64 | // Ignore files that are in the .gitignore: 65 | files = files.filter(ig.createFilter()) 66 | // Sort them to be deterministic 67 | files = files.sort() 68 | 69 | console.log(dir) 70 | console.log("Files:", files) 71 | 72 | // Turn them back into absolute paths: 73 | files = files.map(f => path.join(dir, f)) 74 | const cacheKey = await hashFiles(files) 75 | console.log("Cache key:", cacheKey) 76 | 77 | if (mode == Mode.PushCache) { 78 | console.log("Pushing cache") 79 | try { 80 | if (!AWS_BUCKET) { 81 | throw new Error("AWS_BUCKET not set") 82 | } 83 | try { 84 | child_process.execSync(`aws s3 ls s3://${AWS_BUCKET}/imageCache/${cacheKey}-${arch}.tar.gz`) 85 | console.log("Cache already exists") 86 | } catch (e) { 87 | console.log("Cache doesn't exist", e) 88 | // Read image id from image.json 89 | const imageID = JSON.parse(fs.readFileSync(path.join(dir, 'image.json')).toString()).imageID; 90 | console.log(`Pushing cache for ${dir}: ${imageID}`) 91 | child_process.execSync(`docker image save ${imageID} | gzip | aws s3 cp - s3://${AWS_BUCKET}/imageCache/${cacheKey}-${arch}.tar.gz`); 92 | } 93 | } catch (e) { 94 | console.log("Failed to push image cache:", e) 95 | } 96 | } else if (mode == Mode.LoadCache) { 97 | if (fs.existsSync(path.join(dir, 'image.json'))) { 98 | console.log("Already built") 99 | return; 100 | } 101 | console.log("Loading cache") 102 | let cacheHit = false 103 | try { 104 | if (!AWS_BUCKET) { 105 | throw new Error("AWS_BUCKET not set") 106 | } 107 | const cachePath = fs.mkdtempSync(path.join(os.tmpdir(), 'cache')) 108 | const archivePath = path.join(cachePath, 'archive.tar.gz') 109 | const dockerLoadedMsg = child_process.execSync(`aws s3 cp s3://${AWS_BUCKET}/imageCache/${cacheKey}-${arch}.tar.gz ${archivePath} && docker image load -i ${archivePath}`).toString(); 110 | const loadedImageId = dockerLoadedMsg.match(/Loaded image( ID)?: (.*)/)[2]; 111 | if (loadedImageId) { 112 | console.log(`Cache hit for ${loadedImageId}`); 113 | fs.writeFileSync(path.join(dir, 'image.json'), JSON.stringify({imageID: loadedImageId}) + "\n"); 114 | cacheHit = true 115 | } 116 | } catch (e) { 117 | console.log("Cache not found:", e) 118 | } 119 | 120 | if (cacheHit) { 121 | console.log("Building any remaining things from image.json") 122 | // We're building using -o image.json. This tells make to 123 | // not bother building image.json or anything it depends on. 124 | child_process.execSync(`make -o image.json`, {cwd: dir, stdio: 'inherit'}) 125 | } else { 126 | console.log("No cache, building from scratch") 127 | child_process.execSync(`make`, {cwd: dir, stdio: "inherit"}) 128 | } 129 | } 130 | } 131 | 132 | function walkDir(dir: string) { 133 | let results = []; 134 | fs.readdirSync(dir).forEach(f => { 135 | let dirPath = path.join(dir, f); 136 | let isDirectory = fs.statSync(dirPath).isDirectory(); 137 | results = isDirectory ? results.concat(walkDir(dirPath)) : results.concat(path.join(dir, f)); 138 | }); 139 | return results; 140 | }; 141 | 142 | async function hashFiles(files: string[]): Promise { 143 | const fileHashes = await Promise.all( 144 | files.map(async (file) => { 145 | const data = await fs.promises.readFile(file); 146 | return crypto.createHash('sha256').update(data).digest('hex'); 147 | }) 148 | ); 149 | return crypto.createHash('sha256').update(fileHashes.join('')).digest('hex'); 150 | } 151 | 152 | function addGitignoreIfPresent(ig: Ignore, pathStr: string): boolean { 153 | try { 154 | if (fs.statSync(pathStr).isFile()) { 155 | ig.add(fs.readFileSync(pathStr).toString()) 156 | } 157 | return true 158 | } catch { 159 | return false 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /hole-punch-interop/impl/rust/.gitignore: -------------------------------------------------------------------------------- 1 | rust-libp2p-*.zip 2 | rust-libp2p-* 3 | rust-libp2p-*/* 4 | image.json 5 | -------------------------------------------------------------------------------- /hole-punch-interop/impl/rust/v0.53/Makefile: -------------------------------------------------------------------------------- 1 | image_name := rust-v0.53 2 | commitSha := 7f4ba690e87a867403f6266d8ee7d7db5e7a15bc 3 | 4 | all: image.json 5 | 6 | image.json: rust-libp2p-${commitSha} 7 | cd rust-libp2p-${commitSha} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f hole-punching-tests/Dockerfile . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip 12 | unzip -o rust-libp2p-${commitSha}.zip 13 | 14 | rust-libp2p-${commitSha}.zip: 15 | wget -O $@ "https://github.com/libp2p/rust-libp2p/archive/${commitSha}.zip" 16 | 17 | clean: 18 | rm image.json 19 | rm rust-libp2p-*.zip 20 | rm -rf rust-libp2p-* 21 | -------------------------------------------------------------------------------- /hole-punch-interop/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "libp2p hole-punch test", 3 | "version": "0.0.1", 4 | "description": "Tests hole-punching across libp2p implementations", 5 | "main": "testplans.ts", 6 | "scripts": { 7 | "test": "ts-node src/*.test.ts && ts-node testplans.ts", 8 | "renderResults": "ts-node renderResults.ts", 9 | "cache": "ts-node helpers/cache.ts" 10 | }, 11 | "author": "marcopolo", 12 | "license": "MIT", 13 | "devDependencies": { 14 | "ts-node": "^10.9.1", 15 | "typescript": "^4.9.3" 16 | }, 17 | "dependencies": { 18 | "@types/yargs": "^17.0.19", 19 | "csv-parse": "^5.3.3", 20 | "csv-stringify": "^6.2.3", 21 | "ignore": "^5.2.4", 22 | "json-schema-to-typescript": "^11.0.2", 23 | "sqlite": "^4.1.2", 24 | "sqlite3": "^5.1.2", 25 | "yaml": "^2.2.1", 26 | "yargs": "^17.6.2" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /hole-punch-interop/renderResults.ts: -------------------------------------------------------------------------------- 1 | import { generateTable, load, markdownTable } from './src/lib' 2 | 3 | // Read results.csv 4 | export async function render() { 5 | const runs = load("results.csv") 6 | 7 | const regex = /(?.+) x (?.+) \((?.*)\)/ 8 | const parsedRuns = runs.map(run => { 9 | const match = run.name.match(regex) 10 | if (!match || match.groups === undefined) { 11 | throw new Error(`Run ID ${run.name} does not match the expected format`); 12 | } 13 | return { 14 | ...run, 15 | implA: match.groups.implA, 16 | implB: match.groups.implB, 17 | options: match.groups.options.split(",").map(option => option.replace("_", " ").trim()), 18 | } 19 | }) 20 | 21 | // Group by options 22 | const runsByOptions = parsedRuns.reduce((acc: { [key: string]: any }, run) => { 23 | acc[JSON.stringify(run.options)] = [...acc[JSON.stringify(run.options)] || [], run] 24 | return acc 25 | }, {}) 26 | 27 | let outMd = "" 28 | 29 | for (const runGroup of Object.values(runsByOptions)) { 30 | outMd += `## Using: ${runGroup[0].options.join(", ")}\n` 31 | const table = generateTable(runGroup) 32 | outMd += markdownTable(table) 33 | outMd += "\n\n" 34 | } 35 | 36 | console.log(outMd) 37 | 38 | } 39 | 40 | render() 41 | 42 | -------------------------------------------------------------------------------- /hole-punch-interop/router/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:12-slim 2 | 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get -y install iproute2 nftables jq tcpdump 5 | 6 | COPY *.sh /scripts/ 7 | RUN chmod +x /scripts/*.sh 8 | 9 | HEALTHCHECK CMD [ "sh", "-c", "test $(cat /tmp/setup_done) = 1" ] 10 | 11 | ENTRYPOINT ["./scripts/run.sh"] 12 | -------------------------------------------------------------------------------- /hole-punch-interop/router/Makefile: -------------------------------------------------------------------------------- 1 | image_name := hole-punch-test-router 2 | 3 | all: image.json 4 | 5 | image.json: Dockerfile run.sh 6 | IMAGE_NAME=${image_name} ../dockerBuildWrapper.sh -f Dockerfile . 7 | docker image inspect ${image_name} -f "{{.Id}}" | \ 8 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 9 | clean: 10 | rm image.json 11 | -------------------------------------------------------------------------------- /hole-punch-interop/router/README.md: -------------------------------------------------------------------------------- 1 | # Router 2 | 3 | This directory contains a Debian-based router implemented on top of nftables. 4 | 5 | It expects to be run with two network interfaces: 6 | 7 | - `eth0`: The "external" interface. 8 | - `eth1`: The "internal" interface. 9 | 10 | The order of these is important. 11 | The router cannot possibly know which one is which and thus assumes that `eth0` is the external one and `eth1` the internal one. 12 | The firewall is set up to take incoming traffic on `eth1` and forward + masquerade it to `eth0`. 13 | 14 | It also expects an env variable `DELAY_MS` to be set and will apply this delay as part of the routing process[^1]. 15 | 16 | [^1]: This is done via `tc qdisc` which only works for egress traffic. To ensure the delay applies in both directions, we divide it by 2 and apply it on both interfaces. 17 | -------------------------------------------------------------------------------- /hole-punch-interop/router/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | if [ -z "$DELAY_MS" ]; then 6 | echo "Error: DELAY_MS is not set!" 7 | exit 1 8 | fi 9 | 10 | ADDR_EXTERNAL=$(ip -json addr show eth0 | jq '.[0].addr_info[0].local' -r) 11 | SUBNET_INTERNAL=$(ip -json addr show eth1 | jq '.[0].addr_info[0].local + "/" + (.[0].addr_info[0].prefixlen | tostring)' -r) 12 | 13 | # Set up NAT 14 | nft add table ip nat 15 | nft add chain ip nat postrouting { type nat hook postrouting priority 100 \; } 16 | nft add rule ip nat postrouting ip saddr $SUBNET_INTERNAL oifname "eth0" snat $ADDR_EXTERNAL 17 | 18 | # tc can only apply delays on egress traffic. By setting a delay for both eth0 and eth1, we achieve the active delay passed in as a parameter. 19 | half_of_delay=$(expr "$DELAY_MS" / 2 ) 20 | param="${half_of_delay}ms" 21 | 22 | tc qdisc add dev eth0 root netem delay $param 23 | tc qdisc add dev eth1 root netem delay $param 24 | 25 | echo "1" > /tmp/setup_done # This will be checked by our docker HEALTHCHECK 26 | 27 | tail -f /dev/null # Keep it running forever. 28 | -------------------------------------------------------------------------------- /hole-punch-interop/rust-relay/.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | -------------------------------------------------------------------------------- /hole-punch-interop/rust-relay/.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | -------------------------------------------------------------------------------- /hole-punch-interop/rust-relay/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "relay" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | anyhow = "1.0.72" 11 | env_logger = "0.10.0" 12 | libp2p = { version = "0.52.1", features = ["tokio", "relay", "ed25519", "quic", "tcp", "yamux", "noise", "macros", "identify", "ping"] } 13 | log = "0.4.19" 14 | redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } 15 | tokio = { version = "1.29.1", features = ["rt-multi-thread", "macros"] } 16 | -------------------------------------------------------------------------------- /hole-punch-interop/rust-relay/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.5-labs 2 | FROM rust:1.72.0 as builder 3 | 4 | # Run with access to the target cache to speed up builds 5 | WORKDIR /workspace 6 | ADD . . 7 | 8 | # Build the relay as a statically-linked binary. Unfortunately, we must specify the `--target` explicitly. See https://msfjarvis.dev/posts/building-static-rust-binaries-for-linux/. 9 | RUN --mount=type=cache,target=./target \ 10 | --mount=type=cache,target=/usr/local/cargo/registry \ 11 | RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package relay --target $(rustc -vV | grep host | awk '{print $2}') 12 | 13 | RUN --mount=type=cache,target=./target \ 14 | mv ./target/$(rustc -vV | grep host | awk '{print $2}')/release/relay /usr/local/bin/relay 15 | 16 | FROM alpine:3 17 | COPY --from=builder /usr/local/bin/relay /usr/bin/relay 18 | RUN --mount=type=cache,target=/var/cache/apk apk add iproute2-tc 19 | ENV RUST_BACKTRACE=1 20 | CMD ["/usr/bin/relay"] 21 | -------------------------------------------------------------------------------- /hole-punch-interop/rust-relay/Makefile: -------------------------------------------------------------------------------- 1 | image_name := hole-punch-test-rust-relay 2 | 3 | all: image.json 4 | 5 | image.json: Cargo.lock src/** Dockerfile 6 | IMAGE_NAME=${image_name} ../dockerBuildWrapper.sh -f Dockerfile . 7 | docker image inspect ${image_name} -f "{{.Id}}" | \ 8 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 9 | clean: 10 | rm image.json 11 | -------------------------------------------------------------------------------- /hole-punch-interop/rust-relay/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Context, Result}; 2 | use libp2p::{ 3 | core::{ 4 | multiaddr::{Multiaddr, Protocol}, 5 | muxing::StreamMuxerBox, 6 | transport::Transport, 7 | upgrade, 8 | }, 9 | futures::future::Either, 10 | futures::StreamExt, 11 | identify, identity, noise, ping, quic, relay, 12 | swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, 13 | tcp, yamux, PeerId, Swarm, 14 | }; 15 | use redis::AsyncCommands; 16 | use std::net::{IpAddr, Ipv4Addr}; 17 | 18 | /// The redis key we push the relay's TCP listen address to. 19 | const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS"; 20 | /// The redis key we push the relay's QUIC listen address to. 21 | const RELAY_QUIC_ADDRESS: &str = "RELAY_QUIC_ADDRESS"; 22 | 23 | #[tokio::main] 24 | async fn main() -> Result<()> { 25 | env_logger::builder() 26 | .parse_filters( 27 | "debug,netlink_proto=warn,rustls=warn,multistream_select=warn,libp2p_swarm::connection=info,quinn=debug,libp2p_quic=trace", 28 | ) 29 | .parse_default_env() 30 | .init(); 31 | 32 | let mut swarm = make_swarm()?; 33 | 34 | let tcp_listener_id = swarm.listen_on(tcp_addr(Ipv4Addr::UNSPECIFIED.into()))?; 35 | let quic_listener_id = swarm.listen_on(quic_addr(Ipv4Addr::UNSPECIFIED.into()))?; 36 | 37 | loop { 38 | match swarm.next().await.expect("Infinite Stream.") { 39 | SwarmEvent::NewListenAddr { 40 | address, 41 | listener_id, 42 | } => { 43 | let Some(Protocol::Ip4(addr)) = address.iter().next() else { 44 | bail!("Expected first protocol of listen address to be Ip4") 45 | }; 46 | 47 | if addr.is_loopback() { 48 | log::debug!("Ignoring loop-back address: {address}"); 49 | 50 | continue; 51 | } 52 | 53 | swarm.add_external_address(address.clone()); // We know that in our testing network setup, that we are listening on a "publicly-reachable" address. 54 | 55 | log::info!("Listening on {address}"); 56 | 57 | let address = address 58 | .with(Protocol::P2p(*swarm.local_peer_id())) 59 | .to_string(); 60 | 61 | // Push each address twice because we need to connect two clients. 62 | 63 | let mut redis = RedisClient::new("redis", 6379).await?; 64 | 65 | if listener_id == tcp_listener_id { 66 | redis.push(RELAY_TCP_ADDRESS, &address).await?; 67 | redis.push(RELAY_TCP_ADDRESS, &address).await?; 68 | } 69 | if listener_id == quic_listener_id { 70 | redis.push(RELAY_QUIC_ADDRESS, &address).await?; 71 | redis.push(RELAY_QUIC_ADDRESS, &address).await?; 72 | } 73 | } 74 | other => { 75 | log::trace!("{other:?}") 76 | } 77 | } 78 | } 79 | } 80 | 81 | fn tcp_addr(addr: IpAddr) -> Multiaddr { 82 | Multiaddr::empty().with(addr.into()).with(Protocol::Tcp(0)) 83 | } 84 | 85 | fn quic_addr(addr: IpAddr) -> Multiaddr { 86 | Multiaddr::empty() 87 | .with(addr.into()) 88 | .with(Protocol::Udp(0)) 89 | .with(Protocol::QuicV1) 90 | } 91 | 92 | fn make_swarm() -> Result> { 93 | let local_key = identity::Keypair::generate_ed25519(); 94 | let local_peer_id = PeerId::from(local_key.public()); 95 | log::info!("Local peer id: {local_peer_id}"); 96 | 97 | let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) 98 | .upgrade(upgrade::Version::V1Lazy) 99 | .authenticate(noise::Config::new(&local_key)?) 100 | .multiplex(yamux::Config::default()) 101 | .or_transport(quic::tokio::Transport::new(quic::Config::new(&local_key))) 102 | .map(|either_output, _| match either_output { 103 | Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), 104 | Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), 105 | }) 106 | .boxed(); 107 | let behaviour = Behaviour { 108 | relay: relay::Behaviour::new(local_peer_id, relay::Config::default()), 109 | identify: identify::Behaviour::new(identify::Config::new( 110 | "/hole-punch-tests/1".to_owned(), 111 | local_key.public(), 112 | )), 113 | ping: ping::Behaviour::default(), 114 | }; 115 | 116 | Ok( 117 | SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id) 118 | .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) 119 | .build(), 120 | ) 121 | } 122 | 123 | struct RedisClient { 124 | inner: redis::aio::Connection, 125 | } 126 | 127 | impl RedisClient { 128 | async fn new(host: &str, port: u16) -> Result { 129 | let client = redis::Client::open(format!("redis://{host}:{port}/")) 130 | .context("Bad redis server URL")?; 131 | let connection = client 132 | .get_async_connection() 133 | .await 134 | .context("Failed to connect to redis server")?; 135 | 136 | Ok(Self { inner: connection }) 137 | } 138 | 139 | async fn push(&mut self, key: &str, value: impl ToString) -> Result<()> { 140 | self.inner.rpush(key, value.to_string()).await?; 141 | 142 | Ok(()) 143 | } 144 | } 145 | 146 | #[derive(NetworkBehaviour)] 147 | struct Behaviour { 148 | relay: relay::Behaviour, 149 | identify: identify::Behaviour, 150 | ping: ping::Behaviour, 151 | } 152 | -------------------------------------------------------------------------------- /hole-punch-interop/src/compose-runner.ts: -------------------------------------------------------------------------------- 1 | import {promises as fs} from 'fs'; 2 | import path from 'path'; 3 | import {exec as execStd} from 'child_process'; 4 | import util from 'util'; 5 | import {ComposeSpecification} from "../compose-spec/compose-spec"; 6 | import {stringify} from 'yaml'; 7 | import {sanitizeComposeName} from "./lib"; 8 | 9 | const exec = util.promisify(execStd); 10 | 11 | export async function run(compose: ComposeSpecification, rootAssetDir: string, dryRun: boolean): Promise { 12 | const sanitizedComposeName = sanitizeComposeName(compose.name) 13 | const assetDir = path.join(rootAssetDir, sanitizedComposeName); 14 | 15 | await fs.mkdir(assetDir, { recursive: true }) 16 | 17 | 18 | // Create compose.yaml file 19 | // Some docker compose environments don't like the name field to have special characters 20 | const composeYmlPath = path.join(assetDir, "docker-compose.yaml"); 21 | await fs.writeFile(composeYmlPath, stringify({ ...compose, name: sanitizedComposeName })) 22 | 23 | if (dryRun) { 24 | return null; 25 | } 26 | 27 | const stdoutLogFile = path.join(assetDir, `stdout.log`); 28 | const stderrLogFile = path.join(assetDir, `stderr.log`); 29 | 30 | try { 31 | const { stdout, stderr } = await exec(`docker compose -f ${composeYmlPath} up --exit-code-from dialer --abort-on-container-exit`, { timeout: 60 * 1000 }) 32 | 33 | await fs.writeFile(stdoutLogFile, stdout); 34 | await fs.writeFile(stderrLogFile, stderr); 35 | 36 | return JSON.parse(lastStdoutLine(stdout, "dialer", sanitizedComposeName)) as Report 37 | } catch (e: unknown) { 38 | if (isExecException(e)) { 39 | await fs.writeFile(stdoutLogFile, e.stdout) 40 | await fs.writeFile(stderrLogFile, e.stderr) 41 | } 42 | 43 | throw e 44 | } finally { 45 | try { 46 | await exec(`docker compose -f ${composeYmlPath} down`); 47 | } catch (e) { 48 | console.log("Failed to compose down", e) 49 | } 50 | } 51 | } 52 | 53 | export interface ExecException extends Error { 54 | cmd?: string | undefined; 55 | killed?: boolean | undefined; 56 | code?: number | undefined; 57 | signal?: NodeJS.Signals | undefined; 58 | stdout: string; 59 | stderr: string; 60 | } 61 | 62 | function isExecException(candidate: unknown): candidate is ExecException { 63 | return candidate && typeof candidate === 'object' && 'cmd' in candidate; 64 | } 65 | 66 | interface Report { 67 | rtt_to_holepunched_peer_millis: number 68 | } 69 | 70 | export function lastStdoutLine(stdout: string, component: string, composeName: string): string { 71 | const allComponentStdout = stdout.split("\n").filter(line => line.startsWith(`${composeName}-${component}-1`)); 72 | 73 | const exitMessage = allComponentStdout.pop(); 74 | const lastLine = allComponentStdout.pop(); 75 | 76 | const [front, componentStdout] = lastLine.split("|"); 77 | 78 | return componentStdout.trim() 79 | } 80 | -------------------------------------------------------------------------------- /hole-punch-interop/src/lib.ts: -------------------------------------------------------------------------------- 1 | import * as csv from "csv-parse/sync"; 2 | import fs from "fs"; 3 | 4 | export type ResultLine = { 5 | name: string; 6 | outcome: string; 7 | error: string; 8 | }; 9 | 10 | export type ParsedResultLine = { 11 | name: string; 12 | outcome: string; 13 | error: string; 14 | implA: string; 15 | implB: string; 16 | }; 17 | 18 | export type ResultFile = ResultLine[]; 19 | 20 | export type CellRender = (a: string, b: string, line: ResultLine) => string; 21 | 22 | /** 23 | * called for every cell in the table. 24 | * 25 | * This is designed to let future implementers add more complex ouput interpretation, with nested tables, etc. 26 | */ 27 | export const defaultCellRender: CellRender = (a, b, line) => { 28 | let result = ":red_circle:"; 29 | 30 | if (line.outcome === "success") { 31 | result = ":green_circle:"; 32 | } 33 | 34 | if (process.env.RUN_URL) { 35 | result = `[${result}](${process.env.RUN_URL})`; 36 | } 37 | 38 | return result; 39 | }; 40 | 41 | export const load = (path: string): ResultFile => { 42 | return csv.parse(fs.readFileSync(path, "utf8"), { 43 | columns: true, 44 | skip_empty_lines: true, 45 | delimiter: ",", 46 | }) as ResultFile; 47 | }; 48 | 49 | export const save = (path: string, content: string) => { 50 | fs.writeFileSync(path, content); 51 | }; 52 | 53 | type PairOfImplementation = [string, string]; 54 | 55 | export const listUniqPairs = (pairs: PairOfImplementation[]): string[] => { 56 | const uniq = new Set(); 57 | 58 | for (const [a, b] of pairs) { 59 | uniq.add(a); 60 | uniq.add(b); 61 | } 62 | 63 | return Array.from(uniq).sort(); 64 | }; 65 | 66 | export const generateEmptyMatrix = ( 67 | keys: string[], 68 | defaultValue: string 69 | ): string[][] => { 70 | const header = [" ", ...keys]; 71 | 72 | const matrix = [header]; 73 | const rowOfDefaultValues = Array(keys.length).fill(defaultValue); 74 | 75 | for (const key of keys) { 76 | const row = [key, ...rowOfDefaultValues]; 77 | matrix.push(row); 78 | } 79 | 80 | return matrix; 81 | }; 82 | 83 | export const generateTable = ( 84 | results: Array, 85 | defaultValue: string = ":white_circle:", 86 | testedCell: CellRender = defaultCellRender 87 | ): string[][] => { 88 | const pairs = results.map(({ implA, implB }) => [implA, implB] as PairOfImplementation); 89 | const uniqPairs = listUniqPairs(pairs); 90 | 91 | const matrix = generateEmptyMatrix(uniqPairs, defaultValue); 92 | matrix[0][0] = "⬇️ dialer 📞 \\ ➡️ listener 🎧" 93 | 94 | for (const result of results) { 95 | const { implA, implB } = result 96 | const i = uniqPairs.indexOf(implA); 97 | const j = uniqPairs.indexOf(implB); 98 | 99 | const cell = testedCell(implA, implB, result); 100 | 101 | matrix[i + 1][j + 1] = cell; 102 | } 103 | 104 | return matrix; 105 | }; 106 | 107 | export const markdownTable = (table: string[][]): string => { 108 | const wrapped = (x: string) => `| ${x} |`; 109 | 110 | const header = table[0].join(" | "); 111 | const separator = table[0].map((x) => "-".repeat(x.length)).join(" | "); 112 | 113 | const rows = table.slice(1).map((row) => row.join(" | ")); 114 | 115 | const body = [wrapped(header), wrapped(separator), ...rows.map(wrapped)].join( 116 | "\n" 117 | ); 118 | 119 | return body; 120 | }; 121 | 122 | export function sanitizeComposeName(name: string) { 123 | return name.replace(/[^a-zA-Z0-9_-]/g, "_"); 124 | } 125 | -------------------------------------------------------------------------------- /hole-punch-interop/src/stdoutParser.test.ts: -------------------------------------------------------------------------------- 1 | import {lastStdoutLine} from "./compose-runner"; 2 | 3 | let exampleStdout = ` 4 | Attaching to rust-v0_52_x_rust-v0_52__quic_-dialer-1, rust-v0_52_x_rust-v0_52__quic_-dialer_router-1, rust-v0_52_x_rust-v0_52__quic_-listener-1, rust-v0_52_x_rust-v0_52__quic_-listener_router-1, rust-v0_52_x_rust-v0_52__quic_-redis-1, rust-v0_52_x_rust-v0_52__quic_-relay-1 5 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:C 19 Sep 2023 05:19:20.620 # WARNING Memory overcommit must be enabled! Without it, a background save or replication may fail under low memory condition. Being disabled, it can also cause failures without low memory condition, see https://github.com/jemalloc/jemalloc/issues/1328. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect. 6 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:C 19 Sep 2023 05:19:20.620 * oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo 7 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:C 19 Sep 2023 05:19:20.620 * Redis version=7.2.1, bits=64, commit=00000000, modified=0, pid=1, just started 8 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:C 19 Sep 2023 05:19:20.620 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf 9 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:M 19 Sep 2023 05:19:20.620 * monotonic clock: POSIX clock_gettime 10 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:M 19 Sep 2023 05:19:20.621 * Running mode=standalone, port=6379. 11 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:M 19 Sep 2023 05:19:20.621 * Server initialized 12 | rust-v0_52_x_rust-v0_52__quic_-redis-1 | 1:M 19 Sep 2023 05:19:20.621 * Ready to accept connections tcp 13 | rust-v0_52_x_rust-v0_52__quic_-dialer-1 | {"rtt_to_holepunched_peer_millis":201} 14 | rust-v0_52_x_rust-v0_52__quic_-dialer-1 exited with code 0 15 | `; 16 | 17 | const line = lastStdoutLine(exampleStdout, "dialer", "rust-v0_52_x_rust-v0_52__quic_"); 18 | 19 | if (line != `{"rtt_to_holepunched_peer_millis":201}`) { 20 | throw new Error("Unexpected stdout") 21 | } 22 | -------------------------------------------------------------------------------- /hole-punch-interop/testplans.ts: -------------------------------------------------------------------------------- 1 | import { buildTestSpecs } from "./src/generator" 2 | import { Version, versions } from "./versions" 3 | import { promises as fs } from "fs"; 4 | import {ExecException, run} from "./src/compose-runner" 5 | import { stringify } from "csv-stringify/sync" 6 | import { stringify as YAMLStringify } from "yaml" 7 | import yargs from "yargs/yargs" 8 | import path from "path"; 9 | 10 | (async () => { 11 | const WorkerCount = parseInt(process.env.WORKER_COUNT || "1") 12 | const argv = await yargs(process.argv.slice(2)) 13 | .options({ 14 | 'name-filter': { 15 | description: 'Only run tests including this name', 16 | default: "", 17 | }, 18 | 'name-ignore': { 19 | description: 'Do not run any tests including this name', 20 | default: "", 21 | }, 22 | 'dry-run': { 23 | description: "Don't actually run the test, just generate the compose files", 24 | default: false, 25 | type: 'boolean' 26 | }, 27 | 'extra-versions-dir': { 28 | description: 'Look for extra versions in this directory. Version files must be in json format', 29 | default: "", 30 | type: 'string' 31 | }, 32 | 'extra-version': { 33 | description: 'Paths to JSON files for additional versions to include in the test matrix', 34 | default: [], 35 | type: 'array' 36 | }, 37 | }) 38 | .help() 39 | .version(false) 40 | .alias('help', 'h').argv; 41 | const extraVersionsDir = argv.extraVersionsDir 42 | const extraVersions: Array = [] 43 | if (extraVersionsDir !== "") { 44 | try { 45 | const files = await fs.readdir(extraVersionsDir); 46 | for (const file of files) { 47 | const contents = await fs.readFile(path.join(extraVersionsDir, file)) 48 | extraVersions.push(...JSON.parse(contents.toString())) 49 | } 50 | } catch (err) { 51 | console.error("Error reading extra versions") 52 | console.error(err); 53 | } 54 | } 55 | 56 | for (let versionPath of argv.extraVersion.filter(p => p !== "")) { 57 | const contents = await fs.readFile(versionPath); 58 | extraVersions.push(JSON.parse(contents.toString())) 59 | } 60 | 61 | let nameFilter: string | null = argv["name-filter"] 62 | if (nameFilter === "") { 63 | nameFilter = null 64 | } 65 | let nameIgnore: string | null = argv["name-ignore"] 66 | if (nameIgnore === "") { 67 | nameIgnore = null 68 | } 69 | 70 | let routerImageId = JSON.parse(await fs.readFile(path.join(".", "router", "image.json"), "utf-8")).imageID; 71 | let relayImageId = JSON.parse(await fs.readFile(path.join(".", "rust-relay", "image.json"), "utf-8")).imageID; 72 | 73 | const routerDelay = 100; 74 | const relayDelay = 25; 75 | 76 | const rttRelayedConnection = routerDelay * 2 + relayDelay * 2; 77 | const rttDirectConnection = routerDelay * 2; 78 | 79 | const assetDir = path.join(__dirname, "runs"); 80 | 81 | let testSpecs = await buildTestSpecs(versions.concat(extraVersions), nameFilter, nameIgnore, routerImageId, relayImageId, routerDelay, relayDelay, assetDir) 82 | 83 | console.log(`Running ${testSpecs.length} tests`) 84 | const failures: Array<{ name: String, e: ExecException }> = [] 85 | const statuses: Array = [["name", "outcome"]] 86 | const workers = new Array(WorkerCount).fill({}).map(async () => { 87 | while (true) { 88 | const testSpec = testSpecs.pop() 89 | if (testSpec == null) { 90 | return 91 | } 92 | const name = testSpec.name; 93 | if (!name) { 94 | console.warn("Skipping testSpec without name") 95 | continue; 96 | } 97 | 98 | console.log("Running test spec: " + name) 99 | 100 | try { 101 | const report = await run(testSpec, assetDir, argv['dry-run'] as boolean); 102 | 103 | if (report != null) { 104 | const rttDifference = Math.abs(report.rtt_to_holepunched_peer_millis - rttDirectConnection); 105 | 106 | if (rttDifference > 5) { 107 | // Emit a warning but don't do anything for now. 108 | console.warn(`Expected RTT of direct connection to be ~${rttDirectConnection}ms but was ${report.rtt_to_holepunched_peer_millis}ms`) 109 | } 110 | } 111 | 112 | statuses.push([name, "success"]) 113 | } catch (e) { 114 | failures.push({ name, e }) 115 | statuses.push([name, "failure"]) 116 | } 117 | } 118 | }) 119 | await Promise.all(workers) 120 | 121 | console.log(`${failures.length} failures:`) 122 | 123 | for (const [number, {name, e}] of failures.entries()) { 124 | console.log(`---------- ${name} ---------- (${number + 1} / ${failures.length})\n`); 125 | console.log(e.stderr) 126 | } 127 | 128 | await fs.writeFile("results.csv", stringify(statuses)) 129 | 130 | console.log("Run complete") 131 | })() 132 | -------------------------------------------------------------------------------- /hole-punch-interop/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "resolveJsonModule": true, 4 | "esModuleInterop": true, 5 | "target": "ES2015", 6 | "moduleResolution": "node" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /hole-punch-interop/versions.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs" 2 | 3 | export type Version = { 4 | id: string, 5 | // This can be the image ID, or a function that takes the version ID and returns the image ID. 6 | // By default it uses the canonicalImageIDLookup. 7 | containerImageID?: string, 8 | transports: Array<"tcp" | "quic">, 9 | } 10 | 11 | export const versions: Array = [ 12 | { 13 | id: "rust-v0.53", 14 | transports: ["tcp", "quic"] 15 | } as Version, 16 | ].map((v: Version) => (typeof v.containerImageID === "undefined" ? ({ ...v, containerImageID: readImageId(canonicalImagePath(v.id)) }) : v)) 17 | 18 | function readImageId(path: string): string { 19 | return JSON.parse(fs.readFileSync(path, "utf8")).imageID; 20 | } 21 | 22 | // Finds the `image.json` for the given version id. 23 | // 24 | // Expects the form of "-vX.Y.Z" or "vX.Y". 25 | // The image id must be in the file "./impl//vX.Y/image.json" or "./impl//v0.0.Z/image.json". 26 | function canonicalImagePath(id: string): string { 27 | // Split by implementation and version 28 | const [impl, version] = id.split("-v") 29 | // Drop the patch version 30 | const [major, minor, patch] = version.split(".") 31 | let versionFolder = `v${major}.${minor}` 32 | if (major === "0" && minor === "0") { 33 | // We're still in the 0.0.x phase, so we use the patch version 34 | versionFolder = `v0.0.${patch}` 35 | } 36 | // Read the image ID from the JSON file on the filesystem 37 | return `./impl/${impl}/${versionFolder}/image.json` 38 | } 39 | -------------------------------------------------------------------------------- /perf/Makefile: -------------------------------------------------------------------------------- 1 | ssh-keygen: 2 | ssh-keygen -t ed25519 -f ./terraform/modules/short_lived/files/perf -N '' 3 | 4 | ssh-add: 5 | ssh-add ./terraform/modules/short_lived/files/perf 6 | -------------------------------------------------------------------------------- /perf/README.md: -------------------------------------------------------------------------------- 1 | # libp2p performance benchmarking 2 | 3 | This project includes the following components: 4 | 5 | - `terraform/`: a Terraform scripts to provision infrastructure 6 | - `impl/`: implementations of the [libp2p perf protocol](https://github.com/libp2p/specs/blob/master/perf/perf.md) running on top of e.g. go-libp2p, rust-libp2p or Go's std-library https stack 7 | - `runner/`: a set of scripts building and running the above implementations on the above infrastructure, reporting the results in `benchmark-results.json` 8 | 9 | Benchmark results can be visualized with https://observablehq.com/@libp2p-workspace/performance-dashboard. 10 | 11 | ## Running via GitHub Action 12 | 13 | 1. Create a pull request with your changes on https://github.com/libp2p/test-plans/. 14 | 2. Trigger GitHub Action for branch on https://github.com/libp2p/test-plans/actions/workflows/perf.yml (see _Run workflow_ button). 15 | 3. Wait for action run to finish and to push a commit to your branch. 16 | 4. Visualize results on https://observablehq.com/@libp2p-workspace/performance-dashboard. 17 | 18 | ## Running manually 19 | 20 | ### Prerequisites 21 | 22 | - Terraform 1.5.4 or later 23 | - Node.js 18 or later 24 | - [an AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) 25 | 26 | 27 | ### Provision infrastructure 28 | 29 | 1. Save your public SSH key as the file `./terraform/modules/short_lived/files/perf.pub`; or generate a new key pair with `make ssh-keygen` and add it to your SSH agent with `make ssh-add`. 30 | 2. `cd terraform/configs/local` 31 | 3. `terraform init` 32 | 4. `terraform apply` 33 | 5. `CLIENT_IP=$(terraform output -raw client_ip)` 34 | 6. `SERVER_IP=$(terraform output -raw server_ip)` 35 | 36 | **Notes** 37 | - While running terraform you may encounter the following error: 38 | ```bash 39 | Error: collecting instance settings: reading EC2 Launch Template versions: couldn't find resource 40 | │ 41 | │ with module.short_lived_server[0].aws_instance.perf, 42 | │ on ../../modules/short_lived/main.tf line 15, in resource "aws_instance" "perf": 43 | │ 15: resource "aws_instance" "perf" { 44 | ``` 45 | - This implies that you haven't deployed the long-lived infrastructure on your AWS account. To do so along with each short-lived deployment, you can set *TF_VAR* [`long_lived_enabled`](./terraform/configs/local/terraform.tf#L42) env variable to default to `true`. Terraform should then spin up the long-lived resources that are required for the short-lived resources to be created. 46 | 47 | - It's best to destroy the infrastructure after you're done with your testing, you can do that by running `terraform destroy`. 48 | 49 | ### Build and run libp2p implementations 50 | 51 | Given you have provisioned your infrastructure, you can now build and run the libp2p implementations on the AWS instances. 52 | 53 | 1. `cd runner` 54 | 2. `npm ci` 55 | 3. `npm run start -- --client-public-ip $CLIENT_IP --server-public-ip $SERVER_IP` 56 | * Note: The default number of iterations that perf will run is 10; desired iterations can be set with the `--iterations ` option. 57 | 58 | ### Deprovision infrastructure 59 | 60 | 1. `cd terraform/configs/local` 61 | 2. `terraform destroy` 62 | 63 | ## Adding a new implementation or a new version 64 | 65 | 1. Add the implementation to new subdirectory in [`impl/*`](./impl/). 66 | - For a new implementation, create a folder `impl//` e.g. `go-libp2p` 67 | - For a new version of an existing implementation, create a folder `impl//`. 68 | - In that folder include a `Makefile` that builds an executable and stores it next to the `Makefile` under the name `perf`. 69 | - Requirements for the executable: 70 | - Running as a libp2p-perf server: 71 | - The perf server must not exit as it will be closed by the test runner. 72 | - The executable must accept the command flag `--run-server` which indicates it's running as server. 73 | - Running as a libp2p-perf client 74 | - Given that perf is a client driven set of benchmarks, the performance will be measured by the client. 75 | - Input via command line 76 | - `--server-address` 77 | - `--transport` (see [`runner/versions.ts`](./runner/src/versions.ts#L7-L43) for possible variants) 78 | - `--upload-bytes` number of bytes to upload per stream. 79 | - `--download-bytes` number of bytes to download per stream. 80 | - Output 81 | - Logging MUST go to `stderr`. 82 | - Measurement output is printed to `stdout` as JSON. 83 | - The output schema is: 84 | ``` typescript 85 | interface Data { 86 | type: "intermediary" | "final"; 87 | timeSeconds: number; 88 | uploadBytes: number; 89 | downloadBytes: number; 90 | } 91 | ``` 92 | - Every second the client must print the current progress to stdout. See example below. Note the `type: "intermediary"`. 93 | ``` json 94 | { 95 | "type": "intermediary", 96 | "timeSeconds": 1.004957645, 97 | "uploadBytes": 73039872, 98 | "downloadBytes": 0 99 | }, 100 | ``` 101 | - Before terminating the client must print a final summary. See example below. Note the `type: "final"`. Also note that the measurement includes the time to (1) establish the connection, (2) upload the bytes and (3) download the bytes. 102 | ``` json 103 | { 104 | "type": "final", 105 | "timeSeconds": 60.127230659, 106 | "uploadBytes": 4382392320, 107 | "downloadBytes": 0 108 | } 109 | ``` 110 | 2. For a new implementation, in [`impl/Makefile` include your implementation in the `all` target.](./impl/Makefile#L7) 111 | 3. For a new version, reference version in [`runner/src/versions.ts`](./runner/src/versions.ts#L7-L43). 112 | -------------------------------------------------------------------------------- /perf/impl/Makefile: -------------------------------------------------------------------------------- 1 | GO_SUBDIRS := $(wildcard go-libp2p/*/.) 2 | RUST_SUBDIRS := $(wildcard rust-libp2p/*/.) 3 | HTTPS_SUBDIRS := $(wildcard https/*/.) 4 | QUIC_GO_SUBDIRS := $(wildcard quic-go/*/.) 5 | JS_SUBDIRS := $(wildcard js-libp2p/*/.) 6 | 7 | all: $(RUST_SUBDIRS) $(GO_SUBDIRS) $(HTTPS_SUBDIRS) $(QUIC_GO_SUBDIRS) $(JS_SUBDIRS) 8 | 9 | $(RUST_SUBDIRS): 10 | $(MAKE) -C $@ 11 | 12 | $(GO_SUBDIRS): 13 | $(MAKE) -C $@ 14 | 15 | $(HTTPS_SUBDIRS): 16 | $(MAKE) -C $@ 17 | 18 | $(QUIC_GO_SUBDIRS): 19 | $(MAKE) -C $@ 20 | 21 | $(JS_SUBDIRS): 22 | $(MAKE) -C $@ 23 | 24 | go-libp2p: $(GO_SUBDIRS) 25 | 26 | rust-libp2p: $(RUST_SUBDIRS) 27 | 28 | https: $(HTTPS_SUBDIRS) 29 | 30 | quic-go: $(QUIC_GO_SUBDIRS) 31 | 32 | js-libp2p: $(JS_SUBDIRS) 33 | 34 | clean: $(RUST_SUBDIRS:%=%clean) $(GO_SUBDIRS:%=%clean) $(HTTPS_SUBDIRS:%=%clean) $(QUIC_GO_SUBDIRS:%=%clean) $(JS_SUBDIRS:%=%clean) 35 | 36 | %clean: 37 | $(MAKE) -C $* clean 38 | 39 | .PHONY: $(RUST_SUBDIRS) $(GO_SUBDIRS) $(HTTPS_SUBDIRS) $(QUIC_GO_SUBDIRS) $(JS_SUBDIRS) all clean 40 | -------------------------------------------------------------------------------- /perf/impl/go-libp2p/.gitignore: -------------------------------------------------------------------------------- 1 | go-libp2p-*.zip 2 | go-libp2p-* 3 | go-libp2p-*/* 4 | image.json 5 | -------------------------------------------------------------------------------- /perf/impl/go-libp2p/v0.41/.gitignore: -------------------------------------------------------------------------------- 1 | perf 2 | .cache -------------------------------------------------------------------------------- /perf/impl/go-libp2p/v0.41/Makefile: -------------------------------------------------------------------------------- 1 | GO_FILES := $(wildcard *.go) 2 | 3 | all: perf 4 | 5 | perf: $(GO_FILES) 6 | docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)":/usr/src/myapp -w /usr/src/myapp -e GOCACHE=/usr/src/myapp/.cache golang:$(shell awk '/^go [0-9]+\.[0-9]+$$/ {print $$2}' go.mod) go build -o perf . 7 | 8 | clean: 9 | rm -r .cache 10 | rm perf 11 | 12 | .PHONY: all clean 13 | -------------------------------------------------------------------------------- /perf/impl/go-libp2p/v0.41/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/libp2p/test-plans/perf/impl/go-libp2p/v0.41 2 | 3 | go 1.24 4 | 5 | toolchain go1.24.0 6 | 7 | require ( 8 | github.com/ipfs/go-log/v2 v2.5.1 9 | github.com/libp2p/go-buffer-pool v0.1.0 10 | github.com/libp2p/go-libp2p v0.41.0 11 | github.com/multiformats/go-multiaddr v0.15.0 12 | ) 13 | 14 | require ( 15 | github.com/benbjohnson/clock v1.3.5 // indirect 16 | github.com/beorn7/perks v1.0.1 // indirect 17 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 18 | github.com/containerd/cgroups v1.1.0 // indirect 19 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 20 | github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect 21 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 22 | github.com/docker/go-units v0.5.0 // indirect 23 | github.com/elastic/gosigar v0.14.3 // indirect 24 | github.com/flynn/noise v1.1.0 // indirect 25 | github.com/francoispqt/gojay v1.2.13 // indirect 26 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 27 | github.com/godbus/dbus/v5 v5.1.0 // indirect 28 | github.com/gogo/protobuf v1.3.2 // indirect 29 | github.com/google/gopacket v1.1.19 // indirect 30 | github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect 31 | github.com/google/uuid v1.6.0 // indirect 32 | github.com/gorilla/websocket v1.5.3 // indirect 33 | github.com/huin/goupnp v1.3.0 // indirect 34 | github.com/ipfs/go-cid v0.5.0 // indirect 35 | github.com/jackpal/go-nat-pmp v1.0.2 // indirect 36 | github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect 37 | github.com/klauspost/compress v1.18.0 // indirect 38 | github.com/klauspost/cpuid/v2 v2.2.10 // indirect 39 | github.com/koron/go-ssdp v0.0.5 // indirect 40 | github.com/libp2p/go-flow-metrics v0.2.0 // indirect 41 | github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect 42 | github.com/libp2p/go-msgio v0.3.0 // indirect 43 | github.com/libp2p/go-nat v0.2.0 // indirect 44 | github.com/libp2p/go-netroute v0.2.2 // indirect 45 | github.com/libp2p/go-reuseport v0.4.0 // indirect 46 | github.com/libp2p/go-yamux/v5 v5.0.0 // indirect 47 | github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect 48 | github.com/mattn/go-isatty v0.0.20 // indirect 49 | github.com/miekg/dns v1.1.63 // indirect 50 | github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect 51 | github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect 52 | github.com/minio/sha256-simd v1.0.1 // indirect 53 | github.com/mr-tron/base58 v1.2.0 // indirect 54 | github.com/multiformats/go-base32 v0.1.0 // indirect 55 | github.com/multiformats/go-base36 v0.2.0 // indirect 56 | github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect 57 | github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect 58 | github.com/multiformats/go-multibase v0.2.0 // indirect 59 | github.com/multiformats/go-multicodec v0.9.0 // indirect 60 | github.com/multiformats/go-multihash v0.2.3 // indirect 61 | github.com/multiformats/go-multistream v0.6.0 // indirect 62 | github.com/multiformats/go-varint v0.0.7 // indirect 63 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 64 | github.com/onsi/ginkgo/v2 v2.22.2 // indirect 65 | github.com/opencontainers/runtime-spec v1.2.0 // indirect 66 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect 67 | github.com/pion/datachannel v1.5.10 // indirect 68 | github.com/pion/dtls/v2 v2.2.12 // indirect 69 | github.com/pion/dtls/v3 v3.0.4 // indirect 70 | github.com/pion/ice/v4 v4.0.6 // indirect 71 | github.com/pion/interceptor v0.1.37 // indirect 72 | github.com/pion/logging v0.2.3 // indirect 73 | github.com/pion/mdns/v2 v2.0.7 // indirect 74 | github.com/pion/randutil v0.1.0 // indirect 75 | github.com/pion/rtcp v1.2.15 // indirect 76 | github.com/pion/rtp v1.8.11 // indirect 77 | github.com/pion/sctp v1.8.36 // indirect 78 | github.com/pion/sdp/v3 v3.0.10 // indirect 79 | github.com/pion/srtp/v3 v3.0.4 // indirect 80 | github.com/pion/stun v0.6.1 // indirect 81 | github.com/pion/stun/v3 v3.0.0 // indirect 82 | github.com/pion/transport/v2 v2.2.10 // indirect 83 | github.com/pion/transport/v3 v3.0.7 // indirect 84 | github.com/pion/turn/v4 v4.0.0 // indirect 85 | github.com/pion/webrtc/v4 v4.0.10 // indirect 86 | github.com/pkg/errors v0.9.1 // indirect 87 | github.com/prometheus/client_golang v1.21.0 // indirect 88 | github.com/prometheus/client_model v0.6.1 // indirect 89 | github.com/prometheus/common v0.62.0 // indirect 90 | github.com/prometheus/procfs v0.15.1 // indirect 91 | github.com/quic-go/qpack v0.5.1 // indirect 92 | github.com/quic-go/quic-go v0.50.0 // indirect 93 | github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect 94 | github.com/raulk/go-watchdog v1.3.0 // indirect 95 | github.com/spaolacci/murmur3 v1.1.0 // indirect 96 | github.com/wlynxg/anet v0.0.5 // indirect 97 | go.uber.org/dig v1.18.0 // indirect 98 | go.uber.org/fx v1.23.0 // indirect 99 | go.uber.org/mock v0.5.0 // indirect 100 | go.uber.org/multierr v1.11.0 // indirect 101 | go.uber.org/zap v1.27.0 // indirect 102 | golang.org/x/crypto v0.35.0 // indirect 103 | golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa // indirect 104 | golang.org/x/mod v0.23.0 // indirect 105 | golang.org/x/net v0.35.0 // indirect 106 | golang.org/x/sync v0.11.0 // indirect 107 | golang.org/x/sys v0.30.0 // indirect 108 | golang.org/x/text v0.22.0 // indirect 109 | golang.org/x/tools v0.30.0 // indirect 110 | google.golang.org/protobuf v1.36.5 // indirect 111 | lukechampine.com/blake3 v1.4.0 // indirect 112 | ) 113 | -------------------------------------------------------------------------------- /perf/impl/go-libp2p/v0.41/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "net" 9 | "time" 10 | 11 | "github.com/libp2p/go-libp2p" 12 | "github.com/libp2p/go-libp2p/core/crypto" 13 | "github.com/libp2p/go-libp2p/core/network" 14 | "github.com/libp2p/go-libp2p/core/peer" 15 | "github.com/multiformats/go-multiaddr" 16 | ) 17 | 18 | func main() { 19 | runServer := flag.Bool("run-server", false, "Should run as server") 20 | serverAddr := flag.String("server-address", "", "Server address") 21 | transport := flag.String("transport", "tcp", "Transport to use") 22 | uploadBytes := flag.Uint64("upload-bytes", 0, "Upload bytes") 23 | downloadBytes := flag.Uint64("download-bytes", 0, "Download bytes") 24 | flag.Parse() 25 | 26 | host, port, err := net.SplitHostPort(*serverAddr) 27 | if err != nil { 28 | log.Fatal(err) 29 | } 30 | 31 | tcpMultiAddrStr := fmt.Sprintf("/ip4/%s/tcp/%s", host, port) 32 | quicMultiAddrStr := fmt.Sprintf("/ip4/%s/udp/%s/quic-v1", host, port) 33 | 34 | var opts []libp2p.Option 35 | if *runServer { 36 | opts = append(opts, libp2p.ListenAddrStrings(tcpMultiAddrStr, quicMultiAddrStr)) 37 | // Generate stable fake identity. 38 | // 39 | // Using a stable identity (i.e. peer ID) allows the client to 40 | // connect to the server without a prior exchange of the 41 | // server's peer ID. 42 | priv, _, err := crypto.GenerateEd25519Key(&simpleReader{seed: 0}) 43 | if err != nil { 44 | log.Fatalf("failed to generate key: %s", err) 45 | } 46 | opts = append(opts, libp2p.Identity(priv)) 47 | } 48 | 49 | opts = append(opts, libp2p.ResourceManager(&network.NullResourceManager{})) 50 | 51 | h, err := libp2p.New(opts...) 52 | if err != nil { 53 | log.Fatalf("failed to instantiate libp2p: %s", err) 54 | } 55 | 56 | perf := NewPerfService(h) 57 | if *runServer { 58 | for _, a := range h.Addrs() { 59 | fmt.Println(a.Encapsulate(multiaddr.StringCast("/p2p/" + h.ID().String()))) 60 | } 61 | 62 | select {} // run forever, exit on interrupt 63 | } 64 | 65 | var multiAddrStr string 66 | switch *transport { 67 | case "tcp": 68 | multiAddrStr = tcpMultiAddrStr 69 | case "quic-v1": 70 | multiAddrStr = quicMultiAddrStr 71 | default: 72 | fmt.Println("Invalid transport. Accepted values: 'tcp' or 'quic-v1'") 73 | return 74 | } 75 | // Peer ID corresponds to the above fake identity. 76 | multiAddrStr = multiAddrStr + "/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" 77 | serverInfo, err := peer.AddrInfoFromString(multiAddrStr) 78 | if err != nil { 79 | log.Fatalf("failed to build address info: %s", err) 80 | } 81 | 82 | start := time.Now() 83 | err = h.Connect(context.Background(), *serverInfo) 84 | if err != nil { 85 | log.Fatalf("failed to dial peer: %s", err) 86 | } 87 | 88 | err = perf.RunPerf(context.Background(), serverInfo.ID, uint64(*uploadBytes), uint64(*downloadBytes)) 89 | if err != nil { 90 | log.Fatalf("failed to execute perf: %s", err) 91 | } 92 | 93 | jsonB, err := json.Marshal(Result{ 94 | TimeSeconds: time.Since(start).Seconds(), 95 | UploadBytes: *uploadBytes, 96 | DownloadBytes: *downloadBytes, 97 | Type: "final", 98 | }) 99 | if err != nil { 100 | log.Fatalf("failed to marshal perf result: %s", err) 101 | } 102 | 103 | fmt.Println(string(jsonB)) 104 | } 105 | 106 | type Result struct { 107 | Type string `json:"type"` 108 | TimeSeconds float64 `json:"timeSeconds"` 109 | UploadBytes uint64 `json:"uploadBytes"` 110 | DownloadBytes uint64 `json:"downloadBytes"` 111 | } 112 | 113 | type simpleReader struct { 114 | seed uint8 115 | } 116 | 117 | func (r *simpleReader) Read(p []byte) (n int, err error) { 118 | for i := range p { 119 | p[i] = r.seed 120 | } 121 | return len(p), nil 122 | } 123 | -------------------------------------------------------------------------------- /perf/impl/go-libp2p/v0.41/perf.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/binary" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "time" 10 | 11 | logging "github.com/ipfs/go-log/v2" 12 | pool "github.com/libp2p/go-buffer-pool" 13 | "github.com/libp2p/go-libp2p/core/host" 14 | "github.com/libp2p/go-libp2p/core/network" 15 | "github.com/libp2p/go-libp2p/core/peer" 16 | ) 17 | 18 | var log = logging.Logger("perf") 19 | 20 | const ( 21 | ID = "/perf/1.0.0" 22 | blockSize = 64 << 10 23 | ) 24 | 25 | type PerfService struct { 26 | Host host.Host 27 | } 28 | 29 | func NewPerfService(h host.Host) *PerfService { 30 | ps := &PerfService{h} 31 | h.SetStreamHandler(ID, ps.PerfHandler) 32 | return ps 33 | } 34 | 35 | func (ps *PerfService) PerfHandler(s network.Stream) { 36 | u64Buf := make([]byte, 8) 37 | if _, err := io.ReadFull(s, u64Buf); err != nil { 38 | log.Errorw("err", err) 39 | s.Reset() 40 | return 41 | } 42 | 43 | bytesToSend := binary.BigEndian.Uint64(u64Buf) 44 | 45 | if _, err := drainStream(s); err != nil { 46 | log.Errorw("err", err) 47 | s.Reset() 48 | return 49 | } 50 | 51 | if err := sendBytes(s, bytesToSend); err != nil { 52 | log.Errorw("err", err) 53 | s.Reset() 54 | return 55 | } 56 | s.CloseWrite() 57 | } 58 | 59 | func (ps *PerfService) RunPerf(ctx context.Context, p peer.ID, bytesToSend uint64, bytesToRecv uint64) error { 60 | s, err := ps.Host.NewStream(ctx, p, ID) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | sizeBuf := make([]byte, 8) 66 | binary.BigEndian.PutUint64(sizeBuf, bytesToRecv) 67 | 68 | _, err = s.Write(sizeBuf) 69 | if err != nil { 70 | return err 71 | } 72 | 73 | if err := sendBytes(s, bytesToSend); err != nil { 74 | return err 75 | } 76 | s.CloseWrite() 77 | 78 | recvd, err := drainStream(s) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | if recvd != bytesToRecv { 84 | return fmt.Errorf("expected to recv %d bytes, got %d", bytesToRecv, recvd) 85 | } 86 | 87 | return nil 88 | } 89 | 90 | func sendBytes(s io.Writer, bytesToSend uint64) error { 91 | buf := pool.Get(blockSize) 92 | defer pool.Put(buf) 93 | 94 | lastReportTime := time.Now() 95 | lastReportWrite := uint64(0) 96 | 97 | for bytesToSend > 0 { 98 | now := time.Now() 99 | if now.Sub(lastReportTime) >= time.Second { 100 | jsonB, err := json.Marshal(Result{ 101 | TimeSeconds: now.Sub(lastReportTime).Seconds(), 102 | UploadBytes: lastReportWrite, 103 | Type: "intermediary", 104 | }) 105 | if err != nil { 106 | log.Fatalf("failed to marshal perf result: %s", err) 107 | } 108 | fmt.Println(string(jsonB)) 109 | 110 | lastReportTime = now 111 | lastReportWrite = 0 112 | } 113 | 114 | toSend := buf 115 | if bytesToSend < blockSize { 116 | toSend = buf[:bytesToSend] 117 | } 118 | 119 | n, err := s.Write(toSend) 120 | if err != nil { 121 | return err 122 | } 123 | bytesToSend -= uint64(n) 124 | lastReportWrite += uint64(n) 125 | } 126 | return nil 127 | } 128 | 129 | func drainStream(s io.Reader) (uint64, error) { 130 | var recvd int64 131 | recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()}) 132 | if err != nil && err != io.EOF { 133 | return uint64(recvd), err 134 | } 135 | return uint64(recvd), nil 136 | } 137 | 138 | type reportingReader struct { 139 | orig io.Reader 140 | LastReportTime time.Time 141 | lastReportRead uint64 142 | } 143 | 144 | var _ io.Reader = &reportingReader{} 145 | 146 | func (r *reportingReader) Read(b []byte) (int, error) { 147 | n, err := r.orig.Read(b) 148 | r.lastReportRead += uint64(n) 149 | 150 | now := time.Now() 151 | if now.Sub(r.LastReportTime) >= time.Second { 152 | result := Result{ 153 | TimeSeconds: now.Sub(r.LastReportTime).Seconds(), 154 | Type: "intermediary", 155 | DownloadBytes: r.lastReportRead, 156 | } 157 | 158 | jsonB, err := json.Marshal(result) 159 | if err != nil { 160 | log.Fatalf("failed to marshal perf result: %s", err) 161 | } 162 | fmt.Println(string(jsonB)) 163 | 164 | r.LastReportTime = now 165 | r.lastReportRead = 0 166 | } 167 | 168 | return n, err 169 | } 170 | -------------------------------------------------------------------------------- /perf/impl/https/.gitignore: -------------------------------------------------------------------------------- 1 | go-libp2p-*.zip 2 | go-libp2p-* 3 | go-libp2p-*/* 4 | image.json 5 | https 6 | -------------------------------------------------------------------------------- /perf/impl/https/v0.1/Makefile: -------------------------------------------------------------------------------- 1 | GO_FILES := $(wildcard *.go) 2 | 3 | all: perf 4 | 5 | perf: $(GO_FILES) 6 | docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)":/usr/src/myapp -w /usr/src/myapp -e GOCACHE=/usr/src/myapp/.cache golang:1.20 go build -o perf . 7 | 8 | clean: 9 | rm perf 10 | 11 | .PHONY: all 12 | -------------------------------------------------------------------------------- /perf/impl/https/v0.1/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/libp2p/test-plans/perf/go/https 2 | 3 | go 1.20 4 | -------------------------------------------------------------------------------- /perf/impl/https/v0.1/go.sum: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/libp2p/test-plans/b59a6981542f1ab4e70de9f2e4eea6d132b1c8bf/perf/impl/https/v0.1/go.sum -------------------------------------------------------------------------------- /perf/impl/js-libp2p/.gitignore: -------------------------------------------------------------------------------- 1 | js-libp2p-*.zip 2 | js-libp2p-* 3 | js-libp2p-*/* 4 | -------------------------------------------------------------------------------- /perf/impl/js-libp2p/v2.8/Makefile: -------------------------------------------------------------------------------- 1 | DOCKER_IMAGE := node:22-alpine 2 | DOCKER_RUN := docker run --rm -v "$(shell pwd)":/usr/src/myapp -w /usr/src/myapp $(DOCKER_IMAGE) 3 | 4 | all: perf 5 | 6 | perf: 7 | $(DOCKER_RUN) npm ci 8 | 9 | clean: 10 | rm -rf node_modules 11 | 12 | .PHONY: all clean perf 13 | -------------------------------------------------------------------------------- /perf/impl/js-libp2p/v2.8/index.js: -------------------------------------------------------------------------------- 1 | import { parseArgs } from 'node:util' 2 | import { noise } from '@chainsafe/libp2p-noise' 3 | import { yamux } from '@chainsafe/libp2p-yamux' 4 | import { perf } from '@libp2p/perf' 5 | import { tcp } from '@libp2p/tcp' 6 | import { multiaddr } from '@multiformats/multiaddr' 7 | import { createLibp2p } from 'libp2p' 8 | 9 | const argv = parseArgs({ 10 | options: { 11 | 'run-server': { 12 | type: 'string', 13 | default: 'false' 14 | }, 15 | 'server-address': { 16 | type: 'string' 17 | }, 18 | transport: { 19 | type: 'string', 20 | default: 'tcp' 21 | }, 22 | 'upload-bytes': { 23 | type: 'string', 24 | default: '0' 25 | }, 26 | 'download-bytes': { 27 | type: 'string', 28 | default: '0' 29 | } 30 | } 31 | }) 32 | 33 | /** 34 | * @param {boolean} runServer 35 | * @param {string} serverAddress 36 | * @param {string} transport 37 | * @param {number} uploadBytes 38 | * @param {number} downloadBytes 39 | */ 40 | export async function main (runServer, serverAddress, transport, uploadBytes, downloadBytes) { 41 | const { host, port } = splitHostPort(serverAddress) 42 | 43 | const config = { 44 | transports: [ 45 | tcp() 46 | ], 47 | streamMuxers: [ 48 | yamux() 49 | ], 50 | connectionEncrypters: [ 51 | noise() 52 | ], 53 | services: { 54 | perf: perf() 55 | } 56 | } 57 | 58 | if (runServer) { 59 | Object.assign(config, { 60 | addresses: { 61 | listen: [ 62 | // #TODO: right now we only support tcp 63 | `/ip4/${host}/tcp/${port}` 64 | ] 65 | } 66 | }) 67 | } 68 | 69 | const node = await createLibp2p(config) 70 | 71 | await node.start() 72 | 73 | if (!runServer) { 74 | for await (const output of node.services.perf.measurePerformance(multiaddr(`/ip4/${host}/tcp/${port}`), uploadBytes, downloadBytes)) { 75 | // eslint-disable-next-line no-console 76 | console.log(JSON.stringify(output)) 77 | } 78 | 79 | await node.stop() 80 | } 81 | } 82 | 83 | /** 84 | * @param {string} address 85 | * @returns { host: string, port?: string } 86 | */ 87 | function splitHostPort (address) { 88 | try { 89 | const parts = address.split(':') 90 | const host = parts[0] 91 | const port = parts[1] 92 | return { 93 | host, 94 | port 95 | } 96 | } catch (error) { 97 | throw Error('Invalid server address') 98 | } 99 | } 100 | 101 | main(argv.values['run-server'] === 'true', argv.values['server-address'], argv.values.transport, Number(argv.values['upload-bytes']), Number(argv.values['download-bytes'])).catch((err) => { 102 | // eslint-disable-next-line no-console 103 | console.error(err) 104 | process.exit(1) 105 | }) 106 | -------------------------------------------------------------------------------- /perf/impl/js-libp2p/v2.8/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@libp2p/perf-js-libp2p-2-8", 3 | "private": true, 4 | "main": "index.js", 5 | "type": "module", 6 | "dependencies": { 7 | "@chainsafe/libp2p-noise": "^16.0.3", 8 | "@chainsafe/libp2p-yamux": "^7.0.1", 9 | "@libp2p/perf": "^4.0.30", 10 | "@libp2p/tcp": "^10.1.6", 11 | "@multiformats/multiaddr": "^12.4.0", 12 | "libp2p": "^2.8.0" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /perf/impl/js-libp2p/v2.8/perf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # In case this script is `kill`ed, `kill` its child process, namely the `node` 4 | # process below. 5 | cleanup() { 6 | kill $node_pid 7 | } 8 | trap cleanup EXIT TERM 9 | 10 | # Find the path to the Node.js executable 11 | node_path=$(which node) 12 | 13 | run_server=false 14 | server_address="" 15 | upload_bytes=0 16 | download_bytes=0 17 | transport="" 18 | 19 | # Parse named parameters manually 20 | for ((i = 1; i <= $#; i++)); do 21 | if [ "${!i}" == "--server-address" ]; then 22 | server_address="${@:i+1:1}" 23 | fi 24 | if [ "${!i}" == "--upload-bytes" ]; then 25 | upload_bytes="${@:i+1:1}" 26 | fi 27 | if [ "${!i}" == "--download-bytes" ]; then 28 | download_bytes="${@:i+1:1}" 29 | fi 30 | if [ "${!i}" == "--transport" ]; then 31 | transport="${@:i+1:1}" 32 | fi 33 | if [ "${!i}" == "--run-server" ]; then 34 | run_server=true 35 | fi 36 | done 37 | 38 | # Run perf 39 | node $(dirname "$0")/index.js --run-server=$run_server --server-address=$server_address --upload-bytes=$upload_bytes --download-bytes=$download_bytes --transport=$transport & 40 | 41 | node_pid=$! 42 | 43 | # Wait for `node_pid` to finish, or for it to be `kill`ed by the above 44 | # `cleanup`. 45 | wait $node_pid 46 | -------------------------------------------------------------------------------- /perf/impl/quic-go/.gitignore: -------------------------------------------------------------------------------- 1 | go-libp2p-*.zip 2 | go-libp2p-* 3 | go-libp2p-*/* 4 | image.json 5 | -------------------------------------------------------------------------------- /perf/impl/quic-go/v0.45/Makefile: -------------------------------------------------------------------------------- 1 | commitSha := f059836be63477c4386450682b8f0ed7bc218d8e 2 | 3 | all: perf 4 | 5 | perf: perf-${commitSha} 6 | docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)/perf-${commitSha}":/usr/src/myapp -w /usr/src/myapp -e GOCACHE=/usr/src/myapp/.cache golang:1.23 go build -o perf cmd/main.go 7 | cp perf-${commitSha}/perf . 8 | 9 | perf-${commitSha}: perf-${commitSha}.zip 10 | unzip -o perf-${commitSha}.zip 11 | 12 | perf-${commitSha}.zip: 13 | # TODO: revert 14 | wget -O $@ "https://github.com/quic-go/perf/archive/${commitSha}.zip" 15 | 16 | clean: 17 | rm perf-*.zip 18 | rm -rf perf-* 19 | rm perf 20 | rm .cache 21 | rm v0.27 22 | 23 | .PHONY: all clean 24 | -------------------------------------------------------------------------------- /perf/impl/rust-libp2p/.gitignore: -------------------------------------------------------------------------------- 1 | rust-libp2p-*.zip 2 | rust-libp2p-* 3 | rust-libp2p-*/* 4 | image.json 5 | -------------------------------------------------------------------------------- /perf/impl/rust-libp2p/v0.55/Makefile: -------------------------------------------------------------------------------- 1 | commitSha := 9698607c72ea7b8f5e661963ab0e24384aef1e4e 2 | 3 | all: perf 4 | 5 | perf: rust-libp2p-${commitSha}/target/release/perf 6 | cp ./rust-libp2p-${commitSha}/target/release/perf . 7 | 8 | rust-libp2p-${commitSha}/target/release/perf: rust-libp2p-${commitSha} 9 | docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)/rust-libp2p-${commitSha}":/usr/src/myapp -w /usr/src/myapp rust:1.83 cargo build --release --bin perf 10 | 11 | rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip 12 | unzip -o rust-libp2p-${commitSha}.zip 13 | 14 | rust-libp2p-${commitSha}.zip: 15 | wget -O $@ "https://github.com/libp2p/rust-libp2p/archive/${commitSha}.zip" 16 | 17 | clean: 18 | rm rust-libp2p-*.zip 19 | rm -rf rust-libp2p-* 20 | rm perf 21 | 22 | .PHONY: all clean 23 | -------------------------------------------------------------------------------- /perf/runner/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore built ts files 2 | dist/**/* 3 | -------------------------------------------------------------------------------- /perf/runner/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "runner", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "start": "ts-node src/index.ts" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "devDependencies": { 12 | "@types/node": "^18.15.11", 13 | "@types/yargs": "^17.0.24", 14 | "ts-node": "^10.9.1", 15 | "typescript": "^5.0.4" 16 | }, 17 | "dependencies": { 18 | "yargs": "^17.7.1" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /perf/runner/src/benchmark-result-type.ts: -------------------------------------------------------------------------------- 1 | export type BenchmarkResults = { 2 | benchmarks: Benchmark[], 3 | pings: PingResults, 4 | iperf: IperfResults, 5 | // For referencing this schema in JSON 6 | "$schema"?: string 7 | }; 8 | 9 | export type PingResults = { 10 | unit: "s", 11 | results: number[] 12 | }; 13 | 14 | export type IperfResults = { 15 | unit: "bit/s", 16 | results: number[] 17 | }; 18 | 19 | export type Benchmark = { 20 | name: string, 21 | unit: "bit/s" | "s", 22 | results: Result[], 23 | parameters: 24 | } 25 | 26 | export type Parameters = { 27 | uploadBytes: number, 28 | downloadBytes: number, 29 | } 30 | 31 | export type Result = { 32 | implementation: string, 33 | transportStack: string, 34 | version: string 35 | result: ResultValue[], 36 | }; 37 | 38 | export type ResultValue = { 39 | type: "itermediate" | "final", 40 | time_seconds: number, 41 | upload_bytes: number, 42 | download_bytes: number, 43 | }; 44 | 45 | export type Comparison = { 46 | name: string, 47 | result: number, 48 | } 49 | -------------------------------------------------------------------------------- /perf/runner/src/versions.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs'; 2 | import path from 'path'; 3 | 4 | export type Version = { 5 | id: string, 6 | implementation: "go-libp2p" | "js-libp2p" | "nim-libp2p" | "rust-libp2p" | "zig-libp2p" | "https" | "quic-go", 7 | transportStacks: string[], 8 | } 9 | 10 | export const versions: Array = JSON.parse(fs.readFileSync(path.join(__dirname, '../versionsInput.json'), 'utf8')); 11 | -------------------------------------------------------------------------------- /perf/runner/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2018", 4 | "module": "commonjs", 5 | "outDir": "./dist", 6 | "strict": true, 7 | "resolveJsonModule": true, 8 | "esModuleInterop": true 9 | }, 10 | "include": [ 11 | "./src/**/*.ts" 12 | ], 13 | "exclude": [ 14 | "node_modules" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /perf/runner/versionsInput.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "v0.45", 4 | "implementation": "quic-go", 5 | "transportStacks": [ 6 | "quic-v1" 7 | ] 8 | }, 9 | { 10 | "id": "v0.55", 11 | "implementation": "rust-libp2p", 12 | "transportStacks": [ 13 | "tcp", 14 | "quic-v1" 15 | ] 16 | }, 17 | { 18 | "id": "v0.1", 19 | "implementation": "https", 20 | "transportStacks": [ 21 | "tcp" 22 | ] 23 | }, 24 | { 25 | "id": "v0.41", 26 | "implementation": "go-libp2p", 27 | "transportStacks": [ 28 | "tcp", 29 | "quic-v1" 30 | ] 31 | }, 32 | { 33 | "id": "v2.8", 34 | "implementation": "js-libp2p", 35 | "transportStacks": [ 36 | "tcp" 37 | ] 38 | } 39 | ] 40 | -------------------------------------------------------------------------------- /perf/terraform/.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | !configs/remote/terraform_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | -------------------------------------------------------------------------------- /perf/terraform/configs/README.md: -------------------------------------------------------------------------------- 1 | # Configs 2 | 3 | The terraform configs defined in this directory are used to provision the infrastructure for the libp2p perf tests. 4 | 5 | The configs are named after the type of backend they use. The defaults for what parts of infrastructure they provision differ between the two. 6 | 7 | ## local 8 | 9 | Terraform state in this configuration will be stored locally. The defaults are configured for a single performance benchmark run, i.e. `terraform apply` will bring up short-lived infrastructure only. It will skip long-lived infrastructure like the clean-up Lambda and the instance launch template. 10 | 11 | ## remote 12 | 13 | Terraform state here will be stored remotely in an S3 bucket. `terraform apply` will only bring up the long-lived infrastructure needed to run the performance benchmarks It will skip short-lived infrastructure like launching EC2 instances. 14 | -------------------------------------------------------------------------------- /perf/terraform/configs/local/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/archive" { 5 | version = "2.3.0" 6 | hashes = [ 7 | "h1:NaDbOqAcA9d8DiAS5/6+5smXwN3/+twJGb3QRiz6pNw=", 8 | "h1:OmE1tPjiST8iQp6fC0N3Xzur+q2RvgvD7Lz0TpKSRBw=", 9 | "zh:0869128d13abe12b297b0cd13b8767f10d6bf047f5afc4215615aabc39c2eb4f", 10 | "zh:481ed837d63ba3aa45dd8736da83e911e3509dee0e7961bf5c00ed2644f807b3", 11 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 12 | "zh:9f08fe2977e2166849be24fb9f394e4d2697414d463f7996fd0d7beb4e19a29c", 13 | "zh:9fe566deeafd460d27999ca0bbfd85426a5fcfcb40007b23884deb76da127b6f", 14 | "zh:a1bd9a60925d9769e0da322e4523330ee86af9dc2e770cba1d0247a999ef29cb", 15 | "zh:bb4094c8149f74308b22a87e1ac19bcccca76e8ef021b571074d9bccf1c0c6f0", 16 | "zh:c8984c9def239041ce41ec8e19bbd76a49e74ed2024ff736dad60429dee89bcc", 17 | "zh:ea4bb5ae73db1de3a586e62f39106f5e56770804a55aa5e6b4f642df973e0e75", 18 | "zh:f44a9d596ecc3a8c5653f56ba0cd202ad93b49f76767f4608daf7260b813289e", 19 | "zh:f5c5e6cc9f7f070020ab7d95fcc9ed8e20d5cf219978295a71236e22cbb6d508", 20 | "zh:fd2273f51dcc8f43403bf1e425ba9db08a57c3ddcba5ad7a51742ccde21ca611", 21 | ] 22 | } 23 | 24 | provider "registry.terraform.io/hashicorp/aws" { 25 | version = "4.67.0" 26 | constraints = "4.67.0" 27 | hashes = [ 28 | "h1:5Zfo3GfRSWBaXs4TGQNOflr1XaYj6pRnVJLX5VAjFX4=", 29 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 30 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 31 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 32 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 33 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 34 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 35 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 36 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 37 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 38 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 39 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 40 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 41 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 42 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 43 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 44 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 45 | ] 46 | } 47 | -------------------------------------------------------------------------------- /perf/terraform/configs/local/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.67.0" 6 | } 7 | } 8 | } 9 | 10 | locals { 11 | tags = { 12 | Project = "perf" 13 | } 14 | } 15 | 16 | provider "aws" { 17 | alias = "us-west-2" 18 | region = "us-west-2" 19 | default_tags { 20 | tags = local.tags 21 | } 22 | } 23 | 24 | provider "aws" { 25 | alias = "us-east-1" 26 | region = "us-east-1" 27 | default_tags { 28 | tags = local.tags 29 | } 30 | } 31 | 32 | 33 | variable "ci_enabled" { 34 | type = bool 35 | description = "Whether or not to create resources required to automate the setup in CI (e.g. IAM user, cleanup Lambda)" 36 | default = false 37 | } 38 | 39 | variable "long_lived_enabled" { 40 | type = bool 41 | description = "Whether or not to create long lived resources (in CI, used across runs; e.g. VPCs)" 42 | default = false 43 | } 44 | 45 | variable "short_lived_enabled" { 46 | type = bool 47 | description = "Whether or not to create short lived resources (in CI, specific to each run; e.g. EC2 instances)" 48 | default = true 49 | } 50 | 51 | module "ci" { 52 | count = var.ci_enabled ? 1 : 0 53 | 54 | source = "../../modules/ci" 55 | 56 | regions = ["us-west-2", "us-east-1"] 57 | tags = local.tags 58 | 59 | providers = { 60 | aws = aws.us-west-2 61 | } 62 | } 63 | 64 | module "long_lived_server" { 65 | count = var.long_lived_enabled ? 1 : 0 66 | 67 | source = "../../modules/long_lived" 68 | 69 | region = "us-west-2" 70 | ami = "ami-002829755fa238bfa" 71 | 72 | providers = { 73 | aws = aws.us-west-2 74 | } 75 | } 76 | 77 | module "long_lived_client" { 78 | count = var.long_lived_enabled ? 1 : 0 79 | 80 | source = "../../modules/long_lived" 81 | 82 | region = "us-east-1" 83 | ami = "ami-051f7e7f6c2f40dc1" 84 | 85 | providers = { 86 | aws = aws.us-east-1 87 | } 88 | } 89 | 90 | module "short_lived_server" { 91 | count = var.short_lived_enabled ? 1 : 0 92 | 93 | source = "../../modules/short_lived" 94 | 95 | providers = { 96 | aws = aws.us-west-2 97 | } 98 | 99 | depends_on = [module.long_lived_server] 100 | } 101 | 102 | module "short_lived_client" { 103 | count = var.short_lived_enabled ? 1 : 0 104 | 105 | source = "../../modules/short_lived" 106 | 107 | providers = { 108 | aws = aws.us-east-1 109 | } 110 | 111 | depends_on = [module.long_lived_client] 112 | } 113 | 114 | output "client_ip" { 115 | value = var.short_lived_enabled ? module.short_lived_client[0].public_ip : null 116 | } 117 | 118 | output "server_ip" { 119 | value = var.short_lived_enabled ? module.short_lived_server[0].public_ip : null 120 | } 121 | 122 | -------------------------------------------------------------------------------- /perf/terraform/configs/remote/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/archive" { 5 | version = "2.3.0" 6 | hashes = [ 7 | "h1:NaDbOqAcA9d8DiAS5/6+5smXwN3/+twJGb3QRiz6pNw=", 8 | "h1:OmE1tPjiST8iQp6fC0N3Xzur+q2RvgvD7Lz0TpKSRBw=", 9 | "zh:0869128d13abe12b297b0cd13b8767f10d6bf047f5afc4215615aabc39c2eb4f", 10 | "zh:481ed837d63ba3aa45dd8736da83e911e3509dee0e7961bf5c00ed2644f807b3", 11 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 12 | "zh:9f08fe2977e2166849be24fb9f394e4d2697414d463f7996fd0d7beb4e19a29c", 13 | "zh:9fe566deeafd460d27999ca0bbfd85426a5fcfcb40007b23884deb76da127b6f", 14 | "zh:a1bd9a60925d9769e0da322e4523330ee86af9dc2e770cba1d0247a999ef29cb", 15 | "zh:bb4094c8149f74308b22a87e1ac19bcccca76e8ef021b571074d9bccf1c0c6f0", 16 | "zh:c8984c9def239041ce41ec8e19bbd76a49e74ed2024ff736dad60429dee89bcc", 17 | "zh:ea4bb5ae73db1de3a586e62f39106f5e56770804a55aa5e6b4f642df973e0e75", 18 | "zh:f44a9d596ecc3a8c5653f56ba0cd202ad93b49f76767f4608daf7260b813289e", 19 | "zh:f5c5e6cc9f7f070020ab7d95fcc9ed8e20d5cf219978295a71236e22cbb6d508", 20 | "zh:fd2273f51dcc8f43403bf1e425ba9db08a57c3ddcba5ad7a51742ccde21ca611", 21 | ] 22 | } 23 | 24 | provider "registry.terraform.io/hashicorp/aws" { 25 | version = "4.67.0" 26 | constraints = "4.67.0" 27 | hashes = [ 28 | "h1:5Zfo3GfRSWBaXs4TGQNOflr1XaYj6pRnVJLX5VAjFX4=", 29 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 30 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 31 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 32 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 33 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 34 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 35 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 36 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 37 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 38 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 39 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 40 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 41 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 42 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 43 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 44 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 45 | ] 46 | } 47 | -------------------------------------------------------------------------------- /perf/terraform/configs/remote/terraform.tf: -------------------------------------------------------------------------------- 1 | ../local/terraform.tf -------------------------------------------------------------------------------- /perf/terraform/configs/remote/terraform_override.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "libp2p-terraform-state" 4 | key = "github.com/libp2p/test-plans/perf/terraform/configs/remote/terraform.tfstate" 5 | region = "us-west-2" 6 | } 7 | } 8 | 9 | variable "ci_enabled" { 10 | default = true 11 | } 12 | 13 | variable "long_lived_enabled" { 14 | default = true 15 | } 16 | 17 | variable "short_lived_enabled" { 18 | default = false 19 | } 20 | -------------------------------------------------------------------------------- /perf/terraform/modules/ci/cleanup.tf: -------------------------------------------------------------------------------- 1 | data "archive_file" "cleanup" { 2 | type = "zip" 3 | source_file = "${path.module}/files/cleanup.py" 4 | output_path = "${path.module}/files/cleanup.zip" 5 | } 6 | 7 | resource "aws_lambda_function" "cleanup" { 8 | filename = data.archive_file.cleanup.output_path 9 | source_code_hash = data.archive_file.cleanup.output_base64sha256 10 | function_name = "perf-cleanup" 11 | role = aws_iam_role.cleanup.arn 12 | handler = "cleanup.lambda_handler" 13 | runtime = "python3.9" 14 | memory_size = 128 15 | timeout = 30 16 | 17 | environment { 18 | variables = { 19 | REGIONS = jsonencode(var.regions) 20 | TAGS = jsonencode(var.tags) 21 | MAX_AGE_MINUTES = 360 22 | } 23 | } 24 | } 25 | 26 | resource "aws_cloudwatch_log_group" "cleanup" { 27 | name = "/aws/lambda/${aws_lambda_function.cleanup.function_name}" 28 | retention_in_days = 7 29 | } 30 | 31 | resource "aws_cloudwatch_event_rule" "cleanup" { 32 | name = "perf-cleanup-rule" 33 | schedule_expression = "cron(37 * * * ? *)" # 00:37, 01:37, 02:37, ..., 23:37 34 | } 35 | 36 | resource "aws_cloudwatch_event_target" "cleanup" { 37 | rule = aws_cloudwatch_event_rule.cleanup.name 38 | arn = aws_lambda_function.cleanup.arn 39 | } 40 | 41 | resource "aws_lambda_permission" "cleanup" { 42 | statement_id = "AllowExecutionFromCloudWatch" 43 | action = "lambda:InvokeFunction" 44 | function_name = aws_lambda_function.cleanup.function_name 45 | principal = "events.amazonaws.com" 46 | source_arn = aws_cloudwatch_event_rule.cleanup.arn 47 | } 48 | 49 | data "aws_iam_policy_document" "cleanup_assume_role" { 50 | statement { 51 | actions = ["sts:AssumeRole"] 52 | 53 | principals { 54 | type = "Service" 55 | identifiers = ["lambda.amazonaws.com"] 56 | } 57 | } 58 | } 59 | 60 | resource "aws_iam_role" "cleanup" { 61 | name = "perf-cleanup-lambda-role" 62 | assume_role_policy = data.aws_iam_policy_document.cleanup_assume_role.json 63 | } 64 | 65 | data "aws_iam_policy_document" "cleanup" { 66 | statement { 67 | actions = ["ec2:DescribeInstances", "ec2:DescribeTags", "ec2:DescribeKeyPairs"] 68 | resources = ["*"] 69 | effect = "Allow" 70 | } 71 | 72 | statement { 73 | actions = ["ec2:TerminateInstances", "ec2:DeleteKeyPair"] 74 | resources = ["*"] 75 | effect = "Allow" 76 | 77 | dynamic "condition" { 78 | for_each = var.tags 79 | 80 | content { 81 | test = "StringEquals" 82 | variable = "ec2:ResourceTag/${condition.key}" 83 | values = [condition.value] 84 | } 85 | } 86 | } 87 | } 88 | 89 | resource "aws_iam_role_policy" "cleanup" { 90 | name = "perf-cleanup-lamda-policy" 91 | role = aws_iam_role.cleanup.name 92 | policy = data.aws_iam_policy_document.cleanup.json 93 | } 94 | 95 | data "aws_iam_policy_document" "cleanup_logging" { 96 | statement { 97 | actions = ["logs:CreateLogStream", "logs:PutLogEvents"] 98 | resources = ["${aws_cloudwatch_log_group.cleanup.arn}*"] 99 | effect = "Allow" 100 | } 101 | } 102 | 103 | resource "aws_iam_role_policy" "cleanup_logging" { 104 | name = "perf-lambda-logging" 105 | role = aws_iam_role.cleanup.name 106 | policy = data.aws_iam_policy_document.cleanup_logging.json 107 | } 108 | -------------------------------------------------------------------------------- /perf/terraform/modules/ci/files/.gitignore: -------------------------------------------------------------------------------- 1 | # generated ZIP for AWS Lambda 2 | cleanup.zip 3 | -------------------------------------------------------------------------------- /perf/terraform/modules/ci/files/cleanup.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import os 3 | import json 4 | import datetime 5 | 6 | regions = json.loads(os.environ['REGIONS']) # Assuming this is a JSON array 7 | tags = json.loads(os.environ['TAGS']) # Assuming this is a JSON object 8 | max_age_minutes = int(os.environ['MAX_AGE_MINUTES']) # Assuming this is an integer 9 | 10 | # TODO: Find and delete unused key pairs 11 | def lambda_handler(event, context): 12 | # iterate over all regions 13 | for region in regions: 14 | ec2 = boto3.client('ec2', region_name=region) 15 | 16 | now = datetime.datetime.now(datetime.timezone.utc) 17 | 18 | filters = [{'Name': 'instance-state-name', 'Values': ['running']}] 19 | filters = filters + [{ 20 | 'Name': 'tag:' + k, 21 | 'Values': [v] 22 | } for k, v in tags.items()] 23 | 24 | response = ec2.describe_instances(Filters=filters) 25 | 26 | instances = [] 27 | 28 | for reservation in response['Reservations']: 29 | for instance in reservation['Instances']: 30 | launch_time = instance['LaunchTime'] 31 | instance_id = instance['InstanceId'] 32 | 33 | print( 34 | f'Instance ID: {instance_id} has been running since {launch_time}.') 35 | 36 | if launch_time < now - datetime.timedelta(minutes=max_age_minutes): 37 | print( 38 | f'Instance ID: {instance_id} has been running for more than {max_age_minutes} minutes.') 39 | instances.append(instance_id) 40 | 41 | if instances: 42 | ec2.terminate_instances(InstanceIds=instances) 43 | print(f'Terminating instances: {instances}') 44 | 45 | ec2.describe_ 46 | -------------------------------------------------------------------------------- /perf/terraform/modules/ci/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.67.0" 6 | } 7 | } 8 | } 9 | 10 | variable "tags" { 11 | type = map(string) 12 | description = "Tags that the perf resources are tagged with" 13 | } 14 | 15 | variable "regions" { 16 | type = list(string) 17 | description = "Regions that the perf resources are created in" 18 | } 19 | 20 | resource "aws_iam_user" "perf" { 21 | name = "perf" 22 | } 23 | 24 | # TODO: Make the policy more restrictive; it needs to be able to create/destroy instances and key pairs 25 | data "aws_iam_policy_document" "perf" { 26 | statement { 27 | actions = ["ec2:*"] 28 | resources = ["*"] 29 | effect = "Allow" 30 | } 31 | } 32 | 33 | resource "aws_iam_user_policy" "perf" { 34 | name = "perf" 35 | user = aws_iam_user.perf.name 36 | 37 | policy = data.aws_iam_policy_document.perf.json 38 | } 39 | -------------------------------------------------------------------------------- /perf/terraform/modules/ci/test/cleanup.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /perf/terraform/modules/ci/test/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script can be used to test the cleanup lambda. 4 | # It requires the AWS CLI and SAM CLI to be installed. 5 | # You can get SAM CLI at https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html 6 | 7 | sam local invoke Cleanup --template cleanup.yml --event cleanup.json 8 | -------------------------------------------------------------------------------- /perf/terraform/modules/ci/test/cleanup.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: An AWS Lambda application. 4 | 5 | Resources: 6 | Cleanup: 7 | Type: AWS::Serverless::Function 8 | Properties: 9 | Handler: cleanup.lambda_handler 10 | Runtime: python3.9 11 | CodeUri: ../files 12 | Environment: 13 | Variables: 14 | REGIONS: '["us-west-2", "us-east-1"]' 15 | TAGS: '{"Project":"perf", "Name":"node"}' 16 | MAX_AGE_MINUTES: '360' 17 | Policies: 18 | - AmazonEC2FullAccess 19 | Timeout: 30 20 | -------------------------------------------------------------------------------- /perf/terraform/modules/long_lived/files/user-data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo yum install make -y 4 | 5 | sudo yum -y install iperf3 6 | 7 | # Bump UDP receive buffer size. See https://github.com/quic-go/quic-go/wiki/UDP-Receive-Buffer-Size. 8 | sudo sysctl -w net.core.rmem_max=2500000 9 | sudo sysctl -w net.core.wmem_max=2500000 10 | 11 | # Set maximum TCP send and receive window to bandwidth-delay-product. 12 | # 13 | # With a bandwidth of 25 Gbit/s per machine and a ping of 60 ms between the two 14 | # machines, the bandwidth-delay-product is ~178.81 MiB. Set send and receive 15 | # window to 200 MiB. 16 | sudo sysctl -w net.ipv4.tcp_rmem='4096 131072 200000000' 17 | sudo sysctl -w net.ipv4.tcp_wmem='4096 20480 200000000' 18 | 19 | sudo yum update -y 20 | sudo yum install docker -y 21 | sudo systemctl enable docker 22 | sudo systemctl start docker 23 | sudo usermod -aG docker ec2-user 24 | 25 | # Taken from https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/setting-up-node-on-ec2-instance.html 26 | # 27 | # Adapted to work with user-data according to https://repost.aws/questions/QUhS4f3j8jT6uW5OHAzi0-Wg/nodejs-not-installed-successfully-in-aws-ec2-inside-user-data 28 | sudo -u ec2-user sh -c 'curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash' 29 | sudo -u ec2-user sh -c '. ~/.nvm/nvm.sh && nvm install --lts' 30 | -------------------------------------------------------------------------------- /perf/terraform/modules/long_lived/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.67.0" 6 | } 7 | } 8 | } 9 | 10 | variable "region" { 11 | description = "The AWS region of the provider" 12 | } 13 | 14 | variable "ami" { 15 | description = "The Amazon Machine Image to use" 16 | } 17 | 18 | locals { 19 | availability_zone = "${var.region}a" 20 | } 21 | 22 | resource "aws_vpc" "perf" { 23 | cidr_block = "10.0.0.0/16" 24 | } 25 | 26 | resource "aws_subnet" "perf" { 27 | vpc_id = aws_vpc.perf.id 28 | cidr_block = "10.0.0.0/16" 29 | availability_zone = local.availability_zone 30 | map_public_ip_on_launch = true 31 | } 32 | 33 | resource "aws_internet_gateway" "perf" { 34 | vpc_id = aws_vpc.perf.id 35 | } 36 | 37 | resource "aws_route_table" "perf" { 38 | vpc_id = aws_vpc.perf.id 39 | 40 | route { 41 | cidr_block = "0.0.0.0/0" 42 | gateway_id = aws_internet_gateway.perf.id 43 | } 44 | } 45 | 46 | resource "aws_route_table_association" "perf" { 47 | subnet_id = aws_subnet.perf.id 48 | route_table_id = aws_route_table.perf.id 49 | } 50 | 51 | resource "aws_security_group" "restricted_inbound" { 52 | name = "restricted_inbound" 53 | description = "Allow SSH and port 4001 inbound traffic (TCP and UDP), allow all outbound traffic" 54 | vpc_id = aws_vpc.perf.id 55 | 56 | # ICMP 57 | ingress { 58 | from_port = -1 59 | to_port = -1 60 | protocol = "icmp" 61 | cidr_blocks = ["0.0.0.0/0"] 62 | } 63 | 64 | # SSH (TCP) 65 | ingress { 66 | from_port = 22 67 | to_port = 22 68 | protocol = "tcp" 69 | cidr_blocks = ["0.0.0.0/0"] 70 | } 71 | 72 | ingress { 73 | from_port = 1 74 | to_port = 65535 75 | protocol = "tcp" 76 | cidr_blocks = ["0.0.0.0/0"] 77 | } 78 | 79 | ingress { 80 | from_port = 1 81 | to_port = 65535 82 | protocol = "udp" 83 | cidr_blocks = ["0.0.0.0/0"] 84 | } 85 | 86 | egress { 87 | from_port = 0 88 | to_port = 0 89 | protocol = "-1" 90 | cidr_blocks = ["0.0.0.0/0"] 91 | } 92 | } 93 | 94 | resource "aws_launch_template" "perf" { 95 | name = "perf-node" 96 | image_id = var.ami 97 | instance_type = "m5.xlarge" 98 | 99 | # Debug via: 100 | # - /var/log/cloud-init.log and 101 | # - /var/log/cloud-init-output.log 102 | user_data = filebase64("${path.module}/files/user-data.sh") 103 | 104 | instance_initiated_shutdown_behavior = "terminate" 105 | 106 | network_interfaces { 107 | subnet_id = aws_subnet.perf.id 108 | security_groups = [aws_security_group.restricted_inbound.id] 109 | delete_on_termination = true 110 | } 111 | 112 | block_device_mappings { 113 | device_name = "/dev/xvda" 114 | ebs { 115 | volume_size = 100 # New root volume size in GiB 116 | volume_type = "gp2" 117 | delete_on_termination = true 118 | } 119 | } 120 | 121 | update_default_version = true 122 | } 123 | -------------------------------------------------------------------------------- /perf/terraform/modules/short_lived/files/.gitignore: -------------------------------------------------------------------------------- 1 | # generated SSH key 2 | perf 3 | perf.pub 4 | -------------------------------------------------------------------------------- /perf/terraform/modules/short_lived/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.67.0" 6 | } 7 | } 8 | } 9 | 10 | resource "aws_key_pair" "perf" { 11 | key_name_prefix = "perf-" 12 | public_key = file("${path.module}/files/perf.pub") 13 | } 14 | 15 | resource "aws_instance" "perf" { 16 | tags = { 17 | Name = "perf-node" 18 | } 19 | 20 | launch_template { 21 | name = "perf-node" 22 | } 23 | 24 | key_name = aws_key_pair.perf.key_name 25 | } 26 | 27 | output "public_ip" { 28 | value = aws_instance.perf.public_ip 29 | } 30 | -------------------------------------------------------------------------------- /transport-interop/.gitignore: -------------------------------------------------------------------------------- 1 | # For now, not committing image.json files 2 | image.json 3 | 4 | results.csv 5 | # Logs from testground 6 | *.tgz 7 | -------------------------------------------------------------------------------- /transport-interop/Makefile: -------------------------------------------------------------------------------- 1 | GO_SUBDIRS := $(wildcard impl/go/*/.) 2 | JS_SUBDIRS := $(wildcard impl/js/*/.) 3 | RUST_SUBDIRS := $(wildcard impl/rust/*/.) 4 | NIM_SUBDIRS := $(wildcard impl/nim/*/.) 5 | ZIG_SUBDIRS := $(wildcard impl/zig/*/.) 6 | JAVA_SUBDIRS := $(wildcard impl/java/*/.) 7 | 8 | all: $(GO_SUBDIRS) $(JS_SUBDIRS) $(RUST_SUBDIRS) $(NIM_SUBDIRS) $(ZIG_SUBDIRS) $(JAVA_SUBDIRS) 9 | $(JS_SUBDIRS): 10 | $(MAKE) -C $@ 11 | $(GO_SUBDIRS): 12 | $(MAKE) -C $@ 13 | $(RUST_SUBDIRS): 14 | $(MAKE) -C $@ 15 | $(NIM_SUBDIRS): 16 | $(MAKE) -C $@ 17 | $(ZIG_SUBDIRS): 18 | $(MAKE) -C $@ 19 | $(JAVA_SUBDIRS): 20 | $(MAKE) -C $@ 21 | 22 | .PHONY: $(GO_SUBDIRS) $(JS_SUBDIRS) $(RUST_SUBDIRS) $(NIM_SUBDIRS) $(ZIG_SUBDIRS) $(JAVA_SUBDIRS) all 23 | -------------------------------------------------------------------------------- /transport-interop/README.md: -------------------------------------------------------------------------------- 1 | # Transport Interoperability tests 2 | 3 | This tests that different libp2p implementations can communicate with each other 4 | on each of their supported (transport) capabilities. 5 | 6 | Each version of libp2p is defined in `versions.ts`. There the version defines 7 | its capabilities along with the id of its container image. 8 | 9 | This repo and tests adhere to these constraints: 10 | 1. Be reproducible for a given commit. 11 | 2. Caching is an optimization. Things should be fine without it. 12 | 3. If we have a cache hit, be fast. 13 | 14 | # Test spec 15 | 16 | The implementation is run in a container and is passed parameters via 17 | environment variables. The current parameters are: 18 | 19 | | Name | Description | Is Optional | 20 | | -------------------- | ------------------------------------------------------------ | --------------------------------------------------------------- | 21 | | transport | The transport to use | no | 22 | | muxer | The muxer to use | no, except when transport is one of quic, quic-v1, webtransport | 23 | | security | The security channel to use | no, except when transport is one of quic, quic-v1, webtransport | 24 | | is_dialer | Should you dial or listen | no | 25 | | ip | IP address to bind the listener to | yes, default to "0.0.0.0" | 26 | | redis_addr | A different address to connect to redis (default redis:6379) | yes, default to the `redis` host on port 6379 | 27 | | test_timeout_seconds | Control the timeout of test. | yes, default to 180 seconds. | 28 | 29 | The test should do two different things depending on if it's the dialer or 30 | listener. 31 | 32 | ## Running Locally 33 | 34 | In some cases you may want to run locally when debugging, such as modifying internal dependencies. 35 | 36 | 1. To run the test locally, you'll also need to have docker installed in order to run the redis instance. Once docker is running, you can run the following command to start the redis instance: 37 | 38 | ```bash 39 | docker run --rm -p 6379:6379 redis:7-alpine 40 | ``` 41 | 42 | This will start a redis instance on port 6379. 43 | 44 | 2. Next, you'll need to install the dependencies and build the implementation for the test. In this and the next step we are using a JS implementation as an example, so you would run the following command: 45 | 46 | ```bash 47 | cd impl/js/v0.xx.xx/ && make 48 | ``` 49 | 50 | 3. Then you can run a listener by running the following command in this case we are running a rust listener: 51 | 52 | ```bash 53 | RUST_LOG=yamux=trace transport=tcp muxer=yamux security=noise is_dialer=false ip="0.0.0.0" redis_addr=localhost:6379 cargo run --package interop-tests 54 | ``` 55 | 56 | 3. Finally you can run a dialer by running the following command, ensure that you pass the required environment variables, as well as any that may be of use for debugging: 57 | 58 | ```bash 59 | DEBUG=*:yamux:trace transport=tcp muxer=yamux security=noise is_dialer=true npm run test -- -t node 60 | ``` 61 | 62 | For more details on how to run a dialer vs a listener, see the sections below. 63 | 64 | ## Dialer 65 | 66 | The dialer should emit all diagnostic logs to `stderr`. Only the final JSON 67 | string result should be emitted to `stdout`. 68 | 69 | 1. Connect to the Redis instance. 70 | 2. Create a libp2p node as defined by the environment variables. 71 | 3. Get the listener's address via Redis' `BLPOP` using the `listenerAddr` key. 72 | 4. Record the current instant as `handshakeStartInstant`. 73 | 5. Connect to the listener. 74 | 6. Ping the listener, and record the round trip duration as `pingRTT` 75 | 7. Record the duration since `handshakeStartInstant`. This is `handshakePlusOneRTT`. 76 | 8. Print to `stdout` the JSON formatted string: `{"handshakePlusOneRTTMillis": 77 | handshakePlusOneRTT, "pingRTTMilllis": pingRTT}`. Durations should be printed in 78 | milliseconds as a float. 79 | 9. Exit with a code zero. 80 | 81 | On error, the dialer should return a non-zero exit code. 82 | 83 | ## Listener 84 | 85 | The listener should emit all diagnostic logs to `stderr`. 86 | 87 | 1. Connect to the Redis instance. 88 | 2. Create a libp2p node as defined by the environment variables. 89 | 3. Publish the listener's address via Redis' `RPUSH` using the `listenerAddr` 90 | key. 91 | 4. Sleep for the duration of `test_timeout_seconds`. The test runner will kill this 92 | process when the dialer finishes. 93 | 5. If the timeout is hit, exit with a non-zero error code. 94 | 95 | On error, the listener should return a non-zero exit code. 96 | 97 | # Caching 98 | 99 | The caching strategy is opinionated in an attempt to make things simpler and 100 | faster. Here's how it works: 101 | 102 | 1. We cache the result of image.json in each implementation folder. 103 | 2. The cache key is derived from the hashes of the files in the implementation folder. 104 | 3. When loading from cache, if we have a cache hit, we load the image into 105 | docker and create the image.json file. We then call `make -o image.json` to 106 | allow the implementation to build any extra things from cache (e.g. JS-libp2p 107 | builds browser images from the same base as node). If we have a cache miss, 108 | we simply call `make` and build from scratch. 109 | 4. When we push the cache we use the cache-key along with the docker platform 110 | (arm64 vs x86_64). 111 | -------------------------------------------------------------------------------- /transport-interop/dockerBuildWrapper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env /bin/bash 2 | 3 | CACHING_OPTIONS="" 4 | # If in CI and we have a defined cache bucket, use caching 5 | if [[ -n "${CI}" ]] && [[ -n "${AWS_BUCKET}" ]]; then 6 | CACHING_OPTIONS="\ 7 | --cache-to type=s3,mode=max,bucket=$AWS_BUCKET,region=$AWS_REGION,prefix=buildCache,name=$IMAGE_NAME \ 8 | --cache-from type=s3,mode=max,bucket=$AWS_BUCKET,region=$AWS_REGION,prefix=buildCache,name=$IMAGE_NAME" 9 | fi 10 | 11 | docker buildx build \ 12 | --load \ 13 | -t $IMAGE_NAME $CACHING_OPTIONS "$@" 14 | -------------------------------------------------------------------------------- /transport-interop/impl/go/.gitignore: -------------------------------------------------------------------------------- 1 | go-libp2p-*.zip 2 | go-libp2p-* 3 | go-libp2p-*/* 4 | image.json 5 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.36/Makefile: -------------------------------------------------------------------------------- 1 | image_name := go-v0.36 2 | version := 0.36.5 3 | 4 | all: image.json 5 | 6 | image.json: verify-checksum go-libp2p-${version} 7 | cd go-libp2p-${version} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f test-plans/PingDockerfile . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | go-libp2p-${version}: go-libp2p-${version}.zip 12 | unzip -o go-libp2p-${version}.zip 13 | 14 | go-libp2p-${version}.zip: 15 | wget -O $@ "https://github.com/libp2p/go-libp2p/archive/v${version}.zip" 16 | 17 | # Run `make version.lock` to generate this lock file. This file should be commited. 18 | # This locks the exact contents of the specified version. This lets us use the 19 | # human readable name while still making sure the contents don't change. 20 | version.lock: go-libp2p-${version}.zip 21 | shasum -a 256 go-libp2p-${version}.zip > $@ 22 | 23 | verify-checksum: go-libp2p-${version}.zip 24 | shasum -a 256 -c version.lock 25 | 26 | .PHONY: clean all verify-checksum 27 | 28 | clean: 29 | rm image.json 30 | rm go-libp2p-*.zip 31 | rm -rf go-libp2p-* 32 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.36/version.lock: -------------------------------------------------------------------------------- 1 | da6028d5ba79a65c670ded3d4e0745712cab3e012f24ec38aa771bf58745471a go-libp2p-0.36.5.zip 2 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.37/Makefile: -------------------------------------------------------------------------------- 1 | image_name := go-v0.37 2 | version := 0.37.2 3 | 4 | all: image.json 5 | 6 | image.json: verify-checksum go-libp2p-${version} 7 | cd go-libp2p-${version} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f test-plans/PingDockerfile . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | go-libp2p-${version}: go-libp2p-${version}.zip 12 | unzip -o go-libp2p-${version}.zip 13 | 14 | go-libp2p-${version}.zip: 15 | wget -O $@ "https://github.com/libp2p/go-libp2p/archive/v${version}.zip" 16 | 17 | # Run `make version.lock` to generate this lock file. This file should be commited. 18 | # This locks the exact contents of the specified version. This lets us use the 19 | # human readable name while still making sure the contents don't change. 20 | version.lock: go-libp2p-${version}.zip 21 | shasum -a 256 go-libp2p-${version}.zip > $@ 22 | 23 | verify-checksum: go-libp2p-${version}.zip 24 | shasum -a 256 -c version.lock 25 | 26 | .PHONY: clean all verify-checksum 27 | 28 | clean: 29 | rm image.json 30 | rm go-libp2p-*.zip 31 | rm -rf go-libp2p-* 32 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.37/version.lock: -------------------------------------------------------------------------------- 1 | 4c6e3548c4a75f6bceabe14bae455b78fd29ddfeaa05747cda913683ae274d40 go-libp2p-0.37.2.zip 2 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.39/Makefile: -------------------------------------------------------------------------------- 1 | image_name := go-v0.39 2 | version := 0.39.1 3 | 4 | all: image.json 5 | 6 | image.json: verify-checksum go-libp2p-${version} 7 | cd go-libp2p-${version} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f test-plans/PingDockerfile . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | go-libp2p-${version}: go-libp2p-${version}.zip 12 | unzip -o go-libp2p-${version}.zip 13 | 14 | go-libp2p-${version}.zip: 15 | wget -O $@ "https://github.com/libp2p/go-libp2p/archive/v${version}.zip" 16 | 17 | # Run `make version.lock` to generate this lock file. This file should be commited. 18 | # This locks the exact contents of the specified version. This lets us use the 19 | # human readable name while still making sure the contents don't change. 20 | version.lock: go-libp2p-${version}.zip 21 | shasum -a 256 go-libp2p-${version}.zip > $@ 22 | 23 | verify-checksum: go-libp2p-${version}.zip 24 | shasum -a 256 -c version.lock 25 | 26 | .PHONY: clean all verify-checksum 27 | 28 | clean: 29 | rm image.json 30 | rm go-libp2p-*.zip 31 | rm -rf go-libp2p-* 32 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.39/version.lock: -------------------------------------------------------------------------------- 1 | 154322797bc1fb6dd57d2a2a520f6aaaf8f0881dc66c79fa264f41e51130d468 go-libp2p-0.39.1.zip 2 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.40/Makefile: -------------------------------------------------------------------------------- 1 | image_name := go-v0.40 2 | version := 0.40.0 3 | 4 | all: image.json 5 | 6 | image.json: verify-checksum go-libp2p-${version} 7 | cd go-libp2p-${version} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f test-plans/PingDockerfile . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | go-libp2p-${version}: go-libp2p-${version}.zip 12 | unzip -o go-libp2p-${version}.zip 13 | 14 | go-libp2p-${version}.zip: 15 | wget -O $@ "https://github.com/libp2p/go-libp2p/archive/v${version}.zip" 16 | 17 | # Run `make version.lock` to generate this lock file. This file should be commited. 18 | # This locks the exact contents of the specified version. This lets us use the 19 | # human readable name while still making sure the contents don't change. 20 | version.lock: go-libp2p-${version}.zip 21 | shasum -a 256 go-libp2p-${version}.zip > $@ 22 | 23 | verify-checksum: go-libp2p-${version}.zip 24 | shasum -a 256 -c version.lock 25 | 26 | .PHONY: clean all verify-checksum 27 | 28 | clean: 29 | rm image.json 30 | rm go-libp2p-*.zip 31 | rm -rf go-libp2p-* 32 | -------------------------------------------------------------------------------- /transport-interop/impl/go/v0.40/version.lock: -------------------------------------------------------------------------------- 1 | b0f2144d15fddf1ded6e56545dc3df24e0ab5e4c467f3bca1276453d6011da7a go-libp2p-0.40.0.zip 2 | -------------------------------------------------------------------------------- /transport-interop/impl/java/.gitignore: -------------------------------------------------------------------------------- 1 | java-libp2p-* 2 | nabu-* 3 | image.json -------------------------------------------------------------------------------- /transport-interop/impl/java/v0.0.1/Makefile: -------------------------------------------------------------------------------- 1 | image_name := java-v0.0.1 2 | commitSha := 6fad470c92a5c4ab368bc615e64dbf40b5ff9ccb 3 | 4 | all: image.json 5 | 6 | image.json: 7 | wget -O java-libp2p-${commitSha}.zip "https://github.com/Peergos/nabu/archive/${commitSha}.zip" 8 | unzip -o java-libp2p-${commitSha}.zip 9 | cd nabu-${commitSha} && docker build -t ${image_name} -f Dockerfile . 10 | docker image inspect ${image_name} -f "{{.Id}}" | \ 11 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 12 | 13 | clean: 14 | rm -rf image.json java-libp2p-*.zip nabu-* 15 | -------------------------------------------------------------------------------- /transport-interop/impl/java/v0.6/Makefile: -------------------------------------------------------------------------------- 1 | image_name := java-v0.6 2 | commitSha := f917cc40060fcffc0b7ee9c66a04b35fb1b0a9bd 3 | 4 | all: image.json 5 | 6 | image.json: 7 | wget -O java-libp2p-${commitSha}.zip "https://github.com/Peergos/nabu/archive/${commitSha}.zip" 8 | unzip -o java-libp2p-${commitSha}.zip 9 | cd nabu-${commitSha} && docker build -t ${image_name} -f Dockerfile . 10 | docker image inspect ${image_name} -f "{{.Id}}" | \ 11 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 12 | 13 | clean: 14 | rm -rf image.json java-libp2p-*.zip nabu-* 15 | -------------------------------------------------------------------------------- /transport-interop/impl/java/v0.9/Makefile: -------------------------------------------------------------------------------- 1 | image_name := java-v0.9 2 | commitSha := 2678425df28132e98307c825c90cc6efa58240a8 3 | 4 | all: image.json 5 | 6 | image.json: 7 | wget -O java-libp2p-${commitSha}.zip "https://github.com/Peergos/nabu/archive/${commitSha}.zip" 8 | unzip -o java-libp2p-${commitSha}.zip 9 | cd nabu-${commitSha} && docker build -t ${image_name} -f Dockerfile . 10 | docker image inspect ${image_name} -f "{{.Id}}" | \ 11 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 12 | 13 | clean: 14 | rm -rf image.json java-libp2p-*.zip nabu-* 15 | -------------------------------------------------------------------------------- /transport-interop/impl/js/.gitignore: -------------------------------------------------------------------------------- 1 | *image.json 2 | js-libp2p-*.zip 3 | js-libp2p-* 4 | js-libp2p-*/* 5 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/.aegir.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | import http from 'http' 3 | import { pEvent } from 'p-event' 4 | import { createClient } from 'redis' 5 | 6 | const redisAddr = process.env.redis_addr || 'redis:6379' 7 | const transport = process.env.transport 8 | const isDialer = process.env.is_dialer === 'true' 9 | 10 | /** @type {import('aegir/types').PartialOptions} */ 11 | export default { 12 | test: { 13 | browser: { 14 | config: { 15 | // Ignore self signed certificates 16 | browserContextOptions: { ignoreHTTPSErrors: true } 17 | } 18 | }, 19 | async before () { 20 | // import after build is complete 21 | const { createRelay } = await import('./dist/test/fixtures/relay.js') 22 | 23 | let relayNode 24 | let relayAddr 25 | if (transport === 'webrtc' && !isDialer) { 26 | relayNode = await createRelay() 27 | 28 | const sortByNonLocalIp = (a, b) => { 29 | if (a.toString().includes('127.0.0.1')) { 30 | return 1 31 | } 32 | return -1 33 | } 34 | 35 | relayAddr = relayNode.getMultiaddrs().sort(sortByNonLocalIp)[0].toString() 36 | } 37 | 38 | const redisClient = createClient({ 39 | url: `redis://${redisAddr}` 40 | }) 41 | redisClient.on('error', (err) => { 42 | console.error('Redis client error:', err) 43 | }) 44 | await redisClient.connect() 45 | 46 | const requestListener = async function (req, res) { 47 | const requestJSON = await new Promise(resolve => { 48 | let body = '' 49 | req.on('data', function (data) { 50 | body += data 51 | }) 52 | 53 | req.on('end', function () { 54 | resolve(JSON.parse(body)) 55 | }) 56 | }) 57 | 58 | try { 59 | const redisRes = await redisClient.sendCommand(requestJSON) 60 | 61 | if (redisRes == null) { 62 | console.error('Redis failure - sent', requestJSON, 'received', redisRes) 63 | 64 | res.writeHead(500, { 65 | 'Access-Control-Allow-Origin': '*' 66 | }) 67 | res.end(JSON.stringify({ 68 | message: 'Redis sent back null' 69 | })) 70 | 71 | return 72 | } 73 | 74 | res.writeHead(200, { 75 | 'Access-Control-Allow-Origin': '*' 76 | }) 77 | res.end(JSON.stringify(redisRes)) 78 | } catch (err) { 79 | console.error('Error in redis command:', err) 80 | res.writeHead(500, { 81 | 'Access-Control-Allow-Origin': '*' 82 | }) 83 | res.end(err.toString()) 84 | } 85 | } 86 | 87 | const proxyServer = http.createServer(requestListener) 88 | proxyServer.listen(0) 89 | 90 | await pEvent(proxyServer, 'listening', { 91 | signal: AbortSignal.timeout(5000) 92 | }) 93 | 94 | return { 95 | redisClient, 96 | relayNode, 97 | proxyServer, 98 | env: { 99 | ...process.env, 100 | RELAY_ADDR: relayAddr, 101 | REDIS_PROXY_PORT: proxyServer.address().port 102 | } 103 | } 104 | }, 105 | async after (_, { proxyServer, redisClient, relayNode }) { 106 | await new Promise(resolve => { 107 | proxyServer?.close(() => resolve()) 108 | }) 109 | 110 | try { 111 | // We don't care if this fails 112 | await redisClient?.disconnect() 113 | await relayNode?.stop() 114 | } catch { } 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/BrowserDockerfile: -------------------------------------------------------------------------------- 1 | # Workaround: https://github.com/docker/cli/issues/996 2 | ARG BASE_IMAGE=node-js-libp2p-head 3 | FROM ${BASE_IMAGE} 4 | 5 | WORKDIR /app 6 | 7 | # Options: chromium, firefox, webkit 8 | ARG BROWSER=chromium 9 | ENV BROWSER=${BROWSER} 10 | 11 | ENTRYPOINT npm test -- -t browser -- --browser $BROWSER 12 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/Dockerfile: -------------------------------------------------------------------------------- 1 | # install node and browsers 2 | FROM mcr.microsoft.com/playwright:v1.50.1 3 | 4 | WORKDIR /app 5 | 6 | COPY package*.json .aegir.js tsconfig.json ./ 7 | COPY src ./src 8 | COPY test ./test 9 | 10 | # disable colored output and CLI animation from test runners 11 | ENV CI=true 12 | 13 | # install inside the container so any native deps will have the docker arch 14 | RUN npm ci 15 | RUN npm run build 16 | 17 | ENTRYPOINT npm test -- -t node -- --exit 18 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/Makefile: -------------------------------------------------------------------------------- 1 | image_name := js-v1.x 2 | 3 | # TODO Enable webkit once https://github.com/libp2p/js-libp2p/pull/1627 is in 4 | all: image.json chromium-image.json firefox-image.json update-lock-file 5 | 6 | # Necessary because multistage builds require a docker image name rather than a digest to be used 7 | load-image-json: image.json 8 | docker image tag $$(jq -r .imageID image.json) node-${image_name} 9 | 10 | image.json: 11 | docker builder prune -af 12 | docker build -t node-${image_name} -f ./Dockerfile . 13 | docker image inspect node-${image_name} -f "{{.Id}}" | \ 14 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 15 | 16 | chromium-image.json: load-image-json 17 | docker build -f BrowserDockerfile --build-arg=BASE_IMAGE=node-${image_name} --build-arg=BROWSER=chromium -t chromium-${image_name} . 18 | docker image inspect chromium-${image_name} -f "{{.Id}}" | \ 19 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 20 | 21 | firefox-image.json: load-image-json 22 | docker build -f BrowserDockerfile --build-arg=BASE_IMAGE=node-${image_name} --build-arg=BROWSER=firefox -t firefox-${image_name} . 23 | docker image inspect firefox-${image_name} -f "{{.Id}}" | \ 24 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 25 | 26 | # We update the lock file here so that we make sure we are always using the correct lock file. 27 | # If this changes, CI will fail since there are unstaged changes. 28 | update-lock-file: image.json 29 | CONTAINER_ID=$$(docker create $$(jq -r .imageID image.json)); \ 30 | docker cp $$CONTAINER_ID:/app/package-lock.json ./package-lock.json; \ 31 | docker rm $$CONTAINER_ID 32 | 33 | clean: 34 | rm -rf *-image.json 35 | 36 | .PHONY: clean 37 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@libp2p/transport-interop-libp2p-1.x", 3 | "version": "1.0.0", 4 | "type": "module", 5 | "private": true, 6 | "scripts": { 7 | "clean": "aegir clean", 8 | "build": "aegir build --bundle false", 9 | "test": "aegir test", 10 | "lint": "aegir lint", 11 | "dep-check": "aegir dep-check" 12 | }, 13 | "devDependencies": { 14 | "@chainsafe/libp2p-noise": "^15.0.0", 15 | "@chainsafe/libp2p-yamux": "^6.0.2", 16 | "@libp2p/circuit-relay-v2": "^1.0.24", 17 | "@libp2p/identify": "^2.0.2", 18 | "@libp2p/interface": "^1.4.0", 19 | "@libp2p/mplex": "^10.0.24", 20 | "@libp2p/ping": "^1.0.19", 21 | "@libp2p/tcp": "^9.0.26", 22 | "@libp2p/webrtc": "^4.0.33", 23 | "@libp2p/websockets": "^8.0.24", 24 | "@libp2p/webtransport": "^4.0.32", 25 | "@multiformats/multiaddr": "^12.1.10", 26 | "aegir": "^42.2.11", 27 | "libp2p": "^1.6.0", 28 | "p-event": "^6.0.1", 29 | "redis": "^4.6.10" 30 | }, 31 | "browser": { 32 | "@libp2p/tcp": false 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/src/index.ts: -------------------------------------------------------------------------------- 1 | // Everything is defined in the test folder 2 | 3 | export { } 4 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/test/dialer.spec.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | /* eslint-env mocha */ 3 | 4 | import { multiaddr } from '@multiformats/multiaddr' 5 | import { getLibp2p } from './fixtures/get-libp2p.js' 6 | import { redisProxy } from './fixtures/redis-proxy.js' 7 | import type { Libp2p } from '@libp2p/interface' 8 | import type { PingService } from '@libp2p/ping' 9 | 10 | const isDialer: boolean = process.env.is_dialer === 'true' 11 | const timeoutMs: number = parseInt(process.env.test_timeout_secs ?? '180') * 1000 12 | 13 | describe('ping test (dialer)', function () { 14 | if (!isDialer) { 15 | return 16 | } 17 | 18 | // make the default timeout longer than the listener timeout 19 | this.timeout(timeoutMs + 30_000) 20 | let node: Libp2p<{ ping: PingService }> 21 | 22 | beforeEach(async () => { 23 | node = await getLibp2p() 24 | }) 25 | 26 | afterEach(async () => { 27 | // Shutdown libp2p node 28 | try { 29 | // We don't care if this fails 30 | await node.stop() 31 | } catch { } 32 | }) 33 | 34 | it('should dial and ping', async function () { 35 | this.timeout(timeoutMs + 30_000) 36 | 37 | let [, otherMaStr]: string[] = await redisProxy(['BLPOP', 'listenerAddr', `${timeoutMs / 1000}`]) 38 | 39 | // Hack until these are merged: 40 | // - https://github.com/multiformats/js-multiaddr-to-uri/pull/120 41 | otherMaStr = otherMaStr.replace('/tls/ws', '/wss') 42 | 43 | const otherMa = multiaddr(otherMaStr) 44 | const handshakeStartInstant = Date.now() 45 | 46 | console.error(`node ${node.peerId.toString()} dials: ${otherMa}`) 47 | await node.dial(otherMa, { 48 | signal: AbortSignal.timeout(timeoutMs) 49 | }) 50 | 51 | console.error(`node ${node.peerId.toString()} pings: ${otherMa}`) 52 | const pingRTT = await node.services.ping.ping(multiaddr(otherMa), { 53 | signal: AbortSignal.timeout(timeoutMs) 54 | }) 55 | const handshakePlusOneRTT = Date.now() - handshakeStartInstant 56 | console.log(JSON.stringify({ 57 | handshakePlusOneRTTMillis: handshakePlusOneRTT, 58 | pingRTTMilllis: pingRTT 59 | })) 60 | }) 61 | }) 62 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/test/fixtures/get-libp2p.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable complexity */ 2 | 3 | import { noise } from '@chainsafe/libp2p-noise' 4 | import { yamux } from '@chainsafe/libp2p-yamux' 5 | import { mplex } from '@libp2p/mplex' 6 | import { tcp } from '@libp2p/tcp' 7 | import { webRTC, webRTCDirect } from '@libp2p/webrtc' 8 | import { webSockets } from '@libp2p/websockets' 9 | import * as filters from '@libp2p/websockets/filters' 10 | import { webTransport } from '@libp2p/webtransport' 11 | import { type Libp2pOptions, createLibp2p } from 'libp2p' 12 | import { circuitRelayTransport } from '@libp2p/circuit-relay-v2' 13 | import { type Identify, identify } from '@libp2p/identify' 14 | import { type PingService, ping } from '@libp2p/ping' 15 | import type { Libp2p } from '@libp2p/interface' 16 | 17 | const isDialer: boolean = process.env.is_dialer === 'true' 18 | 19 | // Setup libp2p node 20 | const TRANSPORT = process.env.transport 21 | const SECURE_CHANNEL = process.env.security 22 | const MUXER = process.env.muxer 23 | const IP = process.env.ip ?? '0.0.0.0' 24 | 25 | export async function getLibp2p (): Promise> { 26 | const options: Libp2pOptions<{ ping: PingService, identify: Identify }> = { 27 | start: true, 28 | connectionManager: { 29 | minConnections: 0 30 | }, 31 | connectionGater: { 32 | denyDialMultiaddr: async () => false 33 | }, 34 | services: { 35 | ping: ping(), 36 | identify: identify() 37 | } 38 | } 39 | 40 | switch (TRANSPORT) { 41 | case 'tcp': 42 | options.transports = [tcp()] 43 | options.addresses = { 44 | listen: isDialer ? [] : [`/ip4/${IP}/tcp/0`] 45 | } 46 | break 47 | case 'webtransport': 48 | options.transports = [webTransport()] 49 | if (!isDialer) { 50 | throw new Error('WebTransport is not supported as a listener') 51 | } 52 | break 53 | case 'webrtc-direct': 54 | options.transports = [webRTCDirect()] 55 | options.addresses = { 56 | listen: isDialer ? [] : [`/ip4/${IP}/udp/0/webrtc-direct`] 57 | } 58 | break 59 | case 'webrtc': 60 | options.transports = [webRTC(), 61 | webSockets({ filter: filters.all }), // ws needed to connect to relay 62 | circuitRelayTransport({ 63 | discoverRelays: 1 64 | }) // needed to use the relay 65 | ] 66 | options.addresses = { 67 | listen: isDialer ? [] : ['/webrtc'] 68 | } 69 | break 70 | case 'ws': 71 | options.transports = [webSockets()] 72 | options.addresses = { 73 | listen: isDialer ? [] : [`/ip4/${IP}/tcp/0/ws`] 74 | } 75 | break 76 | case 'wss': 77 | process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0' 78 | options.transports = [webSockets()] 79 | options.addresses = { 80 | listen: isDialer ? [] : [`/ip4/${IP}/tcp/0/wss`] 81 | } 82 | break 83 | default: 84 | throw new Error(`Unknown transport: ${TRANSPORT ?? '???'}`) 85 | } 86 | 87 | let skipSecureChannel = false 88 | let skipMuxer = false 89 | switch (TRANSPORT) { 90 | case 'webtransport': 91 | case 'webrtc-direct': 92 | skipSecureChannel = true 93 | skipMuxer = true 94 | break 95 | case 'webrtc': 96 | skipSecureChannel = true 97 | skipMuxer = true 98 | // Setup yamux and noise to connect to the relay node 99 | options.streamMuxers = [yamux()] 100 | options.connectionEncryption = [noise()] 101 | break 102 | default: 103 | // Do nothing 104 | } 105 | 106 | if (!skipSecureChannel) { 107 | switch (SECURE_CHANNEL) { 108 | case 'noise': 109 | options.connectionEncryption = [noise()] 110 | break 111 | default: 112 | throw new Error(`Unknown secure channel: ${SECURE_CHANNEL ?? ''}`) 113 | } 114 | } 115 | 116 | if (!skipMuxer) { 117 | switch (MUXER) { 118 | case 'mplex': 119 | options.streamMuxers = [mplex()] 120 | break 121 | case 'yamux': 122 | options.streamMuxers = [yamux()] 123 | break 124 | default: 125 | throw new Error(`Unknown muxer: ${MUXER ?? '???'}`) 126 | } 127 | } 128 | 129 | return createLibp2p(options) 130 | } 131 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/test/fixtures/redis-proxy.ts: -------------------------------------------------------------------------------- 1 | export async function redisProxy (commands: any[]): Promise { 2 | const res = await fetch(`http://localhost:${process.env.REDIS_PROXY_PORT}`, { 3 | method: 'POST', 4 | body: JSON.stringify(commands) 5 | }) 6 | 7 | if (!res.ok) { 8 | throw new Error('Redis command failed') 9 | } 10 | 11 | return res.json() 12 | } 13 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/test/fixtures/relay.ts: -------------------------------------------------------------------------------- 1 | import { noise } from '@chainsafe/libp2p-noise' 2 | import { yamux } from '@chainsafe/libp2p-yamux' 3 | import { webSockets } from '@libp2p/websockets' 4 | import * as filters from '@libp2p/websockets/filters' 5 | import { createLibp2p } from 'libp2p' 6 | import { circuitRelayServer } from '@libp2p/circuit-relay-v2' 7 | import { identify } from '@libp2p/identify' 8 | import type { Libp2p } from '@libp2p/interface' 9 | 10 | export async function createRelay (): Promise { 11 | const server = await createLibp2p({ 12 | addresses: { 13 | listen: ['/ip4/0.0.0.0/tcp/0/ws'] 14 | }, 15 | transports: [ 16 | webSockets({ 17 | filter: filters.all 18 | }) 19 | ], 20 | connectionEncryption: [noise()], 21 | streamMuxers: [yamux()], 22 | services: { 23 | identify: identify(), 24 | relay: circuitRelayServer({ 25 | reservations: { 26 | maxReservations: Infinity, 27 | applyDefaultLimit: false 28 | } 29 | }) 30 | } 31 | }) 32 | 33 | return server 34 | } 35 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/test/listener.spec.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | /* eslint-env mocha */ 3 | 4 | import { multiaddr, type Multiaddr } from '@multiformats/multiaddr' 5 | import { getLibp2p } from './fixtures/get-libp2p.js' 6 | import { redisProxy } from './fixtures/redis-proxy.js' 7 | import type { Libp2p } from '@libp2p/interface' 8 | import type { PingService } from '@libp2p/ping' 9 | 10 | const isDialer: boolean = process.env.is_dialer === 'true' 11 | const timeoutMs: number = parseInt(process.env.test_timeout_secs ?? '180') * 1000 12 | 13 | describe('ping test (listener)', function () { 14 | if (isDialer) { 15 | return 16 | } 17 | 18 | // make the default timeout longer than the listener timeout 19 | this.timeout(timeoutMs + 30_000) 20 | let node: Libp2p<{ ping: PingService }> 21 | 22 | beforeEach(async () => { 23 | node = await getLibp2p() 24 | }) 25 | 26 | afterEach(async () => { 27 | // Shutdown libp2p node 28 | try { 29 | // We don't care if this fails 30 | await node.stop() 31 | } catch { } 32 | }) 33 | 34 | it('should listen for ping', async function () { 35 | this.timeout(timeoutMs + 30_000) 36 | 37 | const sortByNonLocalIp = (a: Multiaddr, b: Multiaddr): -1 | 0 | 1 => { 38 | if (a.toString().includes('127.0.0.1')) { 39 | return 1 40 | } 41 | 42 | return -1 43 | } 44 | 45 | let multiaddrs = node.getMultiaddrs().sort(sortByNonLocalIp).map(ma => ma.toString()) 46 | 47 | const transport = process.env.transport 48 | if (transport === 'webrtc') { 49 | const relayAddr = process.env.RELAY_ADDR 50 | const hasWebrtcMultiaddr = new Promise((resolve) => { 51 | const abortController = new AbortController() 52 | node.addEventListener('self:peer:update', (event) => { 53 | const webrtcMas = node.getMultiaddrs().filter(ma => ma.toString().includes('/webrtc')) 54 | if (webrtcMas.length > 0) { 55 | resolve(webrtcMas.sort(sortByNonLocalIp).map(ma => ma.toString())) 56 | } 57 | abortController.abort() 58 | }, { signal: abortController.signal }) 59 | }) 60 | 61 | if (relayAddr == null || relayAddr === '') { 62 | throw new Error('No relayAddr') 63 | } 64 | // const conn = await node.dial(multiaddr(relayAddr)) 65 | console.error('dial relay') 66 | await node.dial(multiaddr(relayAddr), { 67 | signal: AbortSignal.timeout(timeoutMs) 68 | }) 69 | console.error('wait for relay reservation') 70 | multiaddrs = await hasWebrtcMultiaddr 71 | } 72 | 73 | console.error('inform redis of dial address') 74 | console.error(multiaddrs) 75 | // Send the listener addr over the proxy server so this works on both the Browser and Node 76 | await redisProxy(['RPUSH', 'listenerAddr', multiaddrs[0]]) 77 | // Wait 78 | console.error('wait for incoming ping') 79 | await new Promise(resolve => setTimeout(resolve, timeoutMs)) 80 | }) 81 | }) 82 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v1.x/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "aegir/src/config/tsconfig.aegir.json", 3 | "compilerOptions": { 4 | "outDir": "dist" 5 | }, 6 | "include": [ 7 | "src", 8 | "test" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/.aegir.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | import http from 'http' 3 | import { pEvent } from 'p-event' 4 | import { createClient } from 'redis' 5 | 6 | const redisAddr = process.env.redis_addr || 'redis:6379' 7 | const transport = process.env.transport 8 | const isDialer = process.env.is_dialer === 'true' 9 | 10 | /** @type {import('aegir/types').PartialOptions} */ 11 | export default { 12 | test: { 13 | browser: { 14 | config: { 15 | // Ignore self signed certificates 16 | browserContextOptions: { ignoreHTTPSErrors: true } 17 | } 18 | }, 19 | async before () { 20 | // import after build is complete 21 | const { createRelay } = await import('./dist/test/fixtures/relay.js') 22 | 23 | let relayNode 24 | let relayAddr 25 | if (transport === 'webrtc' && !isDialer) { 26 | relayNode = await createRelay() 27 | 28 | const sortByNonLocalIp = (a, b) => { 29 | if (a.toString().includes('127.0.0.1')) { 30 | return 1 31 | } 32 | return -1 33 | } 34 | 35 | relayAddr = relayNode.getMultiaddrs().sort(sortByNonLocalIp)[0].toString() 36 | } 37 | 38 | const redisClient = createClient({ 39 | url: `redis://${redisAddr}` 40 | }) 41 | redisClient.on('error', (err) => { 42 | console.error('Redis client error:', err) 43 | }) 44 | await redisClient.connect() 45 | 46 | const requestListener = async function (req, res) { 47 | const requestJSON = await new Promise(resolve => { 48 | let body = '' 49 | req.on('data', function (data) { 50 | body += data 51 | }) 52 | 53 | req.on('end', function () { 54 | resolve(JSON.parse(body)) 55 | }) 56 | }) 57 | 58 | try { 59 | const redisRes = await redisClient.sendCommand(requestJSON) 60 | 61 | if (redisRes == null) { 62 | console.error('Redis failure - sent', requestJSON, 'received', redisRes) 63 | 64 | res.writeHead(500, { 65 | 'Access-Control-Allow-Origin': '*' 66 | }) 67 | res.end(JSON.stringify({ 68 | message: 'Redis sent back null' 69 | })) 70 | 71 | return 72 | } 73 | 74 | res.writeHead(200, { 75 | 'Access-Control-Allow-Origin': '*' 76 | }) 77 | res.end(JSON.stringify(redisRes)) 78 | } catch (err) { 79 | console.error('Error in redis command:', err) 80 | res.writeHead(500, { 81 | 'Access-Control-Allow-Origin': '*' 82 | }) 83 | res.end(err.toString()) 84 | } 85 | } 86 | 87 | const proxyServer = http.createServer(requestListener) 88 | proxyServer.listen(0) 89 | 90 | await pEvent(proxyServer, 'listening', { 91 | signal: AbortSignal.timeout(5000) 92 | }) 93 | 94 | return { 95 | redisClient, 96 | relayNode, 97 | proxyServer, 98 | env: { 99 | ...process.env, 100 | RELAY_ADDR: relayAddr, 101 | REDIS_PROXY_PORT: proxyServer.address().port 102 | } 103 | } 104 | }, 105 | async after (_, { proxyServer, redisClient, relayNode }) { 106 | await new Promise(resolve => { 107 | proxyServer?.close(() => resolve()) 108 | }) 109 | 110 | try { 111 | // We don't care if this fails 112 | await redisClient?.disconnect() 113 | await relayNode?.stop() 114 | } catch { } 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/BrowserDockerfile: -------------------------------------------------------------------------------- 1 | # Workaround: https://github.com/docker/cli/issues/996 2 | ARG BASE_IMAGE=node-js-libp2p-head 3 | FROM ${BASE_IMAGE} 4 | 5 | WORKDIR /app 6 | 7 | # Options: chromium, firefox, webkit 8 | ARG BROWSER=chromium 9 | ENV BROWSER=${BROWSER} 10 | 11 | ENTRYPOINT npm test -- -t browser -- --browser $BROWSER 12 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/Dockerfile: -------------------------------------------------------------------------------- 1 | # install node and browsers 2 | FROM mcr.microsoft.com/playwright:v1.50.1 3 | 4 | WORKDIR /app 5 | 6 | COPY package*.json .aegir.js tsconfig.json ./ 7 | COPY src ./src 8 | COPY test ./test 9 | 10 | # disable colored output and CLI animation from test runners 11 | ENV CI=true 12 | 13 | # install inside the container so any native deps will have the docker arch 14 | RUN npm ci 15 | RUN npm run build 16 | 17 | ENTRYPOINT npm test -- -t node -- --exit 18 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/Makefile: -------------------------------------------------------------------------------- 1 | image_name := js-v2.x 2 | 3 | # TODO Enable webkit once https://github.com/libp2p/js-libp2p/pull/1627 is in 4 | all: image.json chromium-image.json firefox-image.json update-lock-file 5 | 6 | # Necessary because multistage builds require a docker image name rather than a digest to be used 7 | load-image-json: image.json 8 | docker image tag $$(jq -r .imageID image.json) node-${image_name} 9 | 10 | image.json: 11 | docker builder prune -af 12 | docker build -t node-${image_name} -f ./Dockerfile . 13 | docker image inspect node-${image_name} -f "{{.Id}}" | \ 14 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 15 | 16 | chromium-image.json: load-image-json 17 | docker build -f BrowserDockerfile --build-arg=BASE_IMAGE=node-${image_name} --build-arg=BROWSER=chromium -t chromium-${image_name} . 18 | docker image inspect chromium-${image_name} -f "{{.Id}}" | \ 19 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 20 | 21 | firefox-image.json: load-image-json 22 | docker build -f BrowserDockerfile --build-arg=BASE_IMAGE=node-${image_name} --build-arg=BROWSER=firefox -t firefox-${image_name} . 23 | docker image inspect firefox-${image_name} -f "{{.Id}}" | \ 24 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 25 | 26 | # We update the lock file here so that we make sure we are always using the correct lock file. 27 | # If this changes, CI will fail since there are unstaged changes. 28 | update-lock-file: image.json 29 | CONTAINER_ID=$$(docker create $$(jq -r .imageID image.json)); \ 30 | docker cp $$CONTAINER_ID:/app/package-lock.json ./package-lock.json; \ 31 | docker rm $$CONTAINER_ID 32 | 33 | clean: 34 | rm -rf *-image.json 35 | 36 | .PHONY: clean 37 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@libp2p/transport-interop-libp2p-2.x", 3 | "version": "1.0.0", 4 | "type": "module", 5 | "private": true, 6 | "scripts": { 7 | "clean": "aegir clean", 8 | "build": "aegir build --bundle false", 9 | "test": "aegir test", 10 | "lint": "aegir lint", 11 | "dep-check": "aegir dep-check" 12 | }, 13 | "devDependencies": { 14 | "@chainsafe/libp2p-noise": "^16.0.0", 15 | "@chainsafe/libp2p-yamux": "^7.0.1", 16 | "@libp2p/circuit-relay-v2": "^2.1.1", 17 | "@libp2p/identify": "^3.0.6", 18 | "@libp2p/interface": "^2.1.2", 19 | "@libp2p/mplex": "^11.0.6", 20 | "@libp2p/ping": "^2.0.6", 21 | "@libp2p/tcp": "^10.0.6", 22 | "@libp2p/webrtc": "^5.0.8", 23 | "@libp2p/websockets": "^9.0.6", 24 | "@libp2p/webtransport": "^5.0.8", 25 | "@multiformats/multiaddr": "^12.3.1", 26 | "aegir": "^44.1.1", 27 | "libp2p": "^2.1.4", 28 | "p-event": "^6.0.1", 29 | "redis": "^4.6.10" 30 | }, 31 | "browser": { 32 | "@libp2p/tcp": false 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/src/index.ts: -------------------------------------------------------------------------------- 1 | // Everything is defined in the test folder 2 | 3 | export { } 4 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/test/dialer.spec.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | /* eslint-env mocha */ 3 | 4 | import { multiaddr } from '@multiformats/multiaddr' 5 | import { getLibp2p } from './fixtures/get-libp2p.js' 6 | import { redisProxy } from './fixtures/redis-proxy.js' 7 | import type { Libp2p } from '@libp2p/interface' 8 | import type { PingService } from '@libp2p/ping' 9 | 10 | const isDialer: boolean = process.env.is_dialer === 'true' 11 | const timeoutMs: number = parseInt(process.env.test_timeout_secs ?? '180') * 1000 12 | 13 | describe('ping test (dialer)', function () { 14 | if (!isDialer) { 15 | return 16 | } 17 | 18 | // make the default timeout longer than the listener timeout 19 | this.timeout(timeoutMs + 30_000) 20 | let node: Libp2p<{ ping: PingService }> 21 | 22 | beforeEach(async () => { 23 | node = await getLibp2p() 24 | }) 25 | 26 | afterEach(async () => { 27 | // Shutdown libp2p node 28 | try { 29 | // We don't care if this fails 30 | await node.stop() 31 | } catch { } 32 | }) 33 | 34 | it('should dial and ping', async function () { 35 | this.timeout(timeoutMs + 30_000) 36 | 37 | let [, otherMaStr]: string[] = await redisProxy(['BLPOP', 'listenerAddr', `${timeoutMs / 1000}`]) 38 | 39 | // Hack until these are merged: 40 | // - https://github.com/multiformats/js-multiaddr-to-uri/pull/120 41 | otherMaStr = otherMaStr.replace('/tls/ws', '/wss') 42 | 43 | const otherMa = multiaddr(otherMaStr) 44 | const handshakeStartInstant = Date.now() 45 | 46 | console.error(`node ${node.peerId.toString()} dials: ${otherMa}`) 47 | await node.dial(otherMa, { 48 | signal: AbortSignal.timeout(timeoutMs) 49 | }) 50 | 51 | console.error(`node ${node.peerId.toString()} pings: ${otherMa}`) 52 | const pingRTT = await node.services.ping.ping(multiaddr(otherMa), { 53 | signal: AbortSignal.timeout(timeoutMs) 54 | }) 55 | const handshakePlusOneRTT = Date.now() - handshakeStartInstant 56 | console.log(JSON.stringify({ 57 | handshakePlusOneRTTMillis: handshakePlusOneRTT, 58 | pingRTTMilllis: pingRTT 59 | })) 60 | }) 61 | }) 62 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/test/fixtures/get-libp2p.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable complexity */ 2 | 3 | import { noise } from '@chainsafe/libp2p-noise' 4 | import { yamux } from '@chainsafe/libp2p-yamux' 5 | import { mplex } from '@libp2p/mplex' 6 | import { tcp } from '@libp2p/tcp' 7 | import { webRTC, webRTCDirect } from '@libp2p/webrtc' 8 | import { webSockets } from '@libp2p/websockets' 9 | import * as filters from '@libp2p/websockets/filters' 10 | import { webTransport } from '@libp2p/webtransport' 11 | import { type Libp2pOptions, createLibp2p } from 'libp2p' 12 | import { circuitRelayTransport } from '@libp2p/circuit-relay-v2' 13 | import { type Identify, identify } from '@libp2p/identify' 14 | import { type PingService, ping } from '@libp2p/ping' 15 | import type { Libp2p } from '@libp2p/interface' 16 | 17 | const isDialer: boolean = process.env.is_dialer === 'true' 18 | 19 | // Setup libp2p node 20 | const TRANSPORT = process.env.transport 21 | const SECURE_CHANNEL = process.env.security 22 | const MUXER = process.env.muxer 23 | const IP = process.env.ip ?? '0.0.0.0' 24 | 25 | export async function getLibp2p (): Promise> { 26 | const options: Libp2pOptions<{ ping: PingService, identify: Identify }> = { 27 | start: true, 28 | connectionGater: { 29 | denyDialMultiaddr: async () => false 30 | }, 31 | connectionMonitor: { 32 | enabled: false 33 | }, 34 | services: { 35 | ping: ping(), 36 | identify: identify() 37 | } 38 | } 39 | 40 | switch (TRANSPORT) { 41 | case 'tcp': 42 | options.transports = [tcp()] 43 | options.addresses = { 44 | listen: isDialer ? [] : [`/ip4/${IP}/tcp/0`] 45 | } 46 | break 47 | case 'webtransport': 48 | options.transports = [webTransport()] 49 | if (!isDialer) { 50 | throw new Error('WebTransport is not supported as a listener') 51 | } 52 | break 53 | case 'webrtc-direct': 54 | options.transports = [webRTCDirect()] 55 | options.addresses = { 56 | listen: isDialer ? [] : [`/ip4/${IP}/udp/0/webrtc-direct`] 57 | } 58 | break 59 | case 'webrtc': 60 | options.transports = [webRTC(), 61 | webSockets({ filter: filters.all }), // ws needed to connect to relay 62 | circuitRelayTransport({ 63 | discoverRelays: 1 64 | }) // needed to use the relay 65 | ] 66 | options.addresses = { 67 | listen: isDialer ? [] : ['/webrtc'] 68 | } 69 | break 70 | case 'ws': 71 | options.transports = [webSockets()] 72 | options.addresses = { 73 | listen: isDialer ? [] : [`/ip4/${IP}/tcp/0/ws`] 74 | } 75 | break 76 | case 'wss': 77 | process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0' 78 | options.transports = [webSockets()] 79 | options.addresses = { 80 | listen: isDialer ? [] : [`/ip4/${IP}/tcp/0/wss`] 81 | } 82 | break 83 | default: 84 | throw new Error(`Unknown transport: ${TRANSPORT ?? '???'}`) 85 | } 86 | 87 | let skipSecureChannel = false 88 | let skipMuxer = false 89 | switch (TRANSPORT) { 90 | case 'webtransport': 91 | case 'webrtc-direct': 92 | skipSecureChannel = true 93 | skipMuxer = true 94 | break 95 | case 'webrtc': 96 | skipSecureChannel = true 97 | skipMuxer = true 98 | // Setup yamux and noise to connect to the relay node 99 | options.streamMuxers = [yamux()] 100 | options.connectionEncrypters = [noise()] 101 | break 102 | default: 103 | // Do nothing 104 | } 105 | 106 | if (!skipSecureChannel) { 107 | switch (SECURE_CHANNEL) { 108 | case 'noise': 109 | options.connectionEncrypters = [noise()] 110 | break 111 | default: 112 | throw new Error(`Unknown secure channel: ${SECURE_CHANNEL ?? ''}`) 113 | } 114 | } 115 | 116 | if (!skipMuxer) { 117 | switch (MUXER) { 118 | case 'mplex': 119 | options.streamMuxers = [mplex()] 120 | break 121 | case 'yamux': 122 | options.streamMuxers = [yamux()] 123 | break 124 | default: 125 | throw new Error(`Unknown muxer: ${MUXER ?? '???'}`) 126 | } 127 | } 128 | 129 | return createLibp2p(options) 130 | } 131 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/test/fixtures/redis-proxy.ts: -------------------------------------------------------------------------------- 1 | export async function redisProxy (commands: any[]): Promise { 2 | const res = await fetch(`http://localhost:${process.env.REDIS_PROXY_PORT}`, { 3 | method: 'POST', 4 | body: JSON.stringify(commands) 5 | }) 6 | 7 | if (!res.ok) { 8 | throw new Error('Redis command failed') 9 | } 10 | 11 | return res.json() 12 | } 13 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/test/fixtures/relay.ts: -------------------------------------------------------------------------------- 1 | import { noise } from '@chainsafe/libp2p-noise' 2 | import { yamux } from '@chainsafe/libp2p-yamux' 3 | import { webSockets } from '@libp2p/websockets' 4 | import * as filters from '@libp2p/websockets/filters' 5 | import { createLibp2p } from 'libp2p' 6 | import { circuitRelayServer } from '@libp2p/circuit-relay-v2' 7 | import { identify } from '@libp2p/identify' 8 | import type { Libp2p } from '@libp2p/interface' 9 | 10 | export async function createRelay (): Promise { 11 | const server = await createLibp2p({ 12 | addresses: { 13 | listen: ['/ip4/0.0.0.0/tcp/0/ws'] 14 | }, 15 | transports: [ 16 | webSockets({ 17 | filter: filters.all 18 | }) 19 | ], 20 | connectionEncrypters: [noise()], 21 | streamMuxers: [yamux()], 22 | services: { 23 | identify: identify(), 24 | relay: circuitRelayServer({ 25 | reservations: { 26 | maxReservations: Infinity, 27 | applyDefaultLimit: false 28 | } 29 | }) 30 | } 31 | }) 32 | 33 | return server 34 | } 35 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/test/listener.spec.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | /* eslint-env mocha */ 3 | 4 | import { multiaddr, type Multiaddr } from '@multiformats/multiaddr' 5 | import { getLibp2p } from './fixtures/get-libp2p.js' 6 | import { redisProxy } from './fixtures/redis-proxy.js' 7 | import type { Libp2p } from '@libp2p/interface' 8 | import type { PingService } from '@libp2p/ping' 9 | 10 | const isDialer: boolean = process.env.is_dialer === 'true' 11 | const timeoutMs: number = parseInt(process.env.test_timeout_secs ?? '180') * 1000 12 | 13 | describe('ping test (listener)', function () { 14 | if (isDialer) { 15 | return 16 | } 17 | 18 | // make the default timeout longer than the listener timeout 19 | this.timeout(timeoutMs + 30_000) 20 | let node: Libp2p<{ ping: PingService }> 21 | 22 | beforeEach(async () => { 23 | node = await getLibp2p() 24 | }) 25 | 26 | afterEach(async () => { 27 | // Shutdown libp2p node 28 | try { 29 | // We don't care if this fails 30 | await node.stop() 31 | } catch { } 32 | }) 33 | 34 | it('should listen for ping', async function () { 35 | this.timeout(timeoutMs + 30_000) 36 | 37 | const sortByNonLocalIp = (a: Multiaddr, b: Multiaddr): -1 | 0 | 1 => { 38 | if (a.toString().includes('127.0.0.1')) { 39 | return 1 40 | } 41 | 42 | return -1 43 | } 44 | 45 | let multiaddrs = node.getMultiaddrs().sort(sortByNonLocalIp).map(ma => ma.toString()) 46 | 47 | const transport = process.env.transport 48 | if (transport === 'webrtc') { 49 | const relayAddr = process.env.RELAY_ADDR 50 | const hasWebrtcMultiaddr = new Promise((resolve) => { 51 | const abortController = new AbortController() 52 | node.addEventListener('self:peer:update', (event) => { 53 | const webrtcMas = node.getMultiaddrs().filter(ma => ma.toString().includes('/webrtc')) 54 | if (webrtcMas.length > 0) { 55 | resolve(webrtcMas.sort(sortByNonLocalIp).map(ma => ma.toString())) 56 | } 57 | abortController.abort() 58 | }, { signal: abortController.signal }) 59 | }) 60 | 61 | if (relayAddr == null || relayAddr === '') { 62 | throw new Error('No relayAddr') 63 | } 64 | // const conn = await node.dial(multiaddr(relayAddr)) 65 | console.error('dial relay') 66 | await node.dial(multiaddr(relayAddr), { 67 | signal: AbortSignal.timeout(timeoutMs) 68 | }) 69 | console.error('wait for relay reservation') 70 | multiaddrs = await hasWebrtcMultiaddr 71 | } 72 | 73 | console.error('inform redis of dial address') 74 | console.error(multiaddrs) 75 | // Send the listener addr over the proxy server so this works on both the Browser and Node 76 | await redisProxy(['RPUSH', 'listenerAddr', multiaddrs[0]]) 77 | // Wait 78 | console.error('wait for incoming ping') 79 | await new Promise(resolve => setTimeout(resolve, timeoutMs)) 80 | }) 81 | }) 82 | -------------------------------------------------------------------------------- /transport-interop/impl/js/v2.x/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "aegir/src/config/tsconfig.aegir.json", 3 | "compilerOptions": { 4 | "outDir": "dist" 5 | }, 6 | "include": [ 7 | "src", 8 | "test" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /transport-interop/impl/nim/mainv1.nim: -------------------------------------------------------------------------------- 1 | import 2 | std/[os, strutils, sequtils], 3 | chronos, redis, serialization, json_serialization, 4 | libp2p, libp2p/protocols/ping, libp2p/transports/wstransport 5 | 6 | type 7 | ResultJson = object 8 | handshakePlusOneRTTMillis: float 9 | pingRTTMilllis: float 10 | 11 | let 12 | testTimeout = 13 | try: seconds(parseInt(getEnv("test_timeout_seconds"))) 14 | except CatchableError: 3.minutes 15 | 16 | proc main {.async.} = 17 | 18 | let 19 | transport = getEnv("transport") 20 | muxer = getEnv("muxer") 21 | secureChannel = getEnv("security") 22 | isDialer = getEnv("is_dialer") == "true" 23 | envIp = getEnv("ip", "0.0.0.0") 24 | ip = 25 | # nim-libp2p doesn't do snazzy ip expansion 26 | if envIp == "0.0.0.0": 27 | block: 28 | let addresses = getInterfaces().filterIt(it.name == "eth0").mapIt(it.addresses) 29 | if addresses.len < 1 or addresses[0].len < 1: 30 | quit "Can't find local ip!" 31 | ($addresses[0][0].host).split(":")[0] 32 | else: 33 | envIp 34 | redisAddr = getEnv("redis_addr", "redis:6379").split(":") 35 | 36 | # using synchronous redis because async redis is based on 37 | # asyncdispatch instead of chronos 38 | redisClient = open(redisAddr[0], Port(parseInt(redisAddr[1]))) 39 | 40 | switchBuilder = SwitchBuilder.new() 41 | 42 | case transport: 43 | of "tcp": 44 | discard switchBuilder.withTcpTransport().withAddress( 45 | MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet() 46 | ) 47 | of "ws": 48 | discard switchBuilder.withTransport(proc (upgr: Upgrade): Transport = WsTransport.new(upgr)).withAddress( 49 | MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet() 50 | ) 51 | else: doAssert false 52 | 53 | case secureChannel: 54 | of "noise": discard switchBuilder.withNoise() 55 | else: doAssert false 56 | 57 | case muxer: 58 | of "yamux": discard switchBuilder.withYamux() 59 | of "mplex": discard switchBuilder.withMplex() 60 | else: doAssert false 61 | 62 | let 63 | rng = libp2p.newRng() 64 | switch = switchBuilder.withRng(rng).build() 65 | pingProtocol = Ping.new(rng = rng) 66 | switch.mount(pingProtocol) 67 | await switch.start() 68 | defer: await switch.stop() 69 | 70 | if not isDialer: 71 | discard redisClient.rPush("listenerAddr", $switch.peerInfo.fullAddrs.tryGet()[0]) 72 | await sleepAsync(100.hours) # will get cancelled 73 | else: 74 | let 75 | remoteAddr = MultiAddress.init(redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]).tryGet() 76 | dialingStart = Moment.now() 77 | remotePeerId = await switch.connect(remoteAddr) 78 | stream = await switch.dial(remotePeerId, PingCodec) 79 | pingDelay = await pingProtocol.ping(stream) 80 | totalDelay = Moment.now() - dialingStart 81 | await stream.close() 82 | 83 | echo Json.encode( 84 | ResultJson( 85 | handshakePlusOneRTTMillis: float(totalDelay.milliseconds), 86 | pingRTTMilllis: float(pingDelay.milliseconds) 87 | ) 88 | ) 89 | quit(0) 90 | 91 | discard waitFor(main().withTimeout(testTimeout)) 92 | quit(1) 93 | -------------------------------------------------------------------------------- /transport-interop/impl/nim/v1.0/.gitignore: -------------------------------------------------------------------------------- 1 | main.nim 2 | nim-libp2p 3 | nim-libp2p-*.zip 4 | nim-libp2p-*/ 5 | -------------------------------------------------------------------------------- /transport-interop/impl/nim/v1.0/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG NimVersion="1.6.16" 2 | ARG ImageHash="sha256:b4bb14fb74465a91a4e042194e1e9308965e7f2f824a06a7092ed938dc04015f" 3 | FROM nimlang/nim:${NimVersion}-alpine@${ImageHash} AS builder 4 | 5 | WORKDIR /app 6 | 7 | COPY nim-libp2p nim-libp2p 8 | 9 | RUN \ 10 | cd nim-libp2p && \ 11 | nimble install_pinned 12 | 13 | RUN \ 14 | cd nim-libp2p && \ 15 | nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" 16 | 17 | COPY main.nim main.nim 18 | RUN \ 19 | nim c --NimblePath:nim-libp2p/nimbledeps/pkgs -p:nim-libp2p -d:chronicles_log_level=WARN --threads:off main.nim 20 | 21 | ENTRYPOINT ["/app/main"] 22 | -------------------------------------------------------------------------------- /transport-interop/impl/nim/v1.0/Makefile: -------------------------------------------------------------------------------- 1 | image_name := nim-v1.0 2 | commitSha := 408dcf12bdf44dcd6f9021a6c795c472679d6d02 3 | 4 | all: image.json 5 | 6 | image.json: main.nim nim-libp2p Dockerfile 7 | IMAGE_NAME=${image_name} ../../../dockerBuildWrapper.sh . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | main.nim: ../mainv1.nim 12 | cp ../mainv1.nim main.nim 13 | 14 | nim-libp2p: nim-libp2p-${commitSha} 15 | rm -rf nim-libp2p 16 | ln -s nim-libp2p-${commitSha} nim-libp2p 17 | 18 | nim-libp2p-${commitSha}: nim-libp2p-${commitSha}.zip 19 | unzip -o nim-libp2p-${commitSha}.zip 20 | 21 | nim-libp2p-${commitSha}.zip: 22 | wget -O $@ "https://github.com/status-im/nim-libp2p/archive/${commitSha}.zip" 23 | 24 | .PHONY: clean all 25 | 26 | clean: 27 | rm -f main.nim 28 | rm -f image.json 29 | rm -rf nim-libp2p* 30 | -------------------------------------------------------------------------------- /transport-interop/impl/rust-chromium/.gitignore: -------------------------------------------------------------------------------- 1 | rust-libp2p-*.zip 2 | rust-libp2p-* 3 | rust-libp2p-*/* 4 | image.json 5 | chromium-image.json 6 | -------------------------------------------------------------------------------- /transport-interop/impl/rust-chromium/v0.53/Makefile: -------------------------------------------------------------------------------- 1 | image_name := rust-chromium-v0.53 2 | commitSha := b7914e407da34c99fb76dcc300b3d44b9af97fac 3 | 4 | all: image.json 5 | 6 | image.json: rust-libp2p-${commitSha} 7 | cd rust-libp2p-${commitSha} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f interop-tests/Dockerfile.chromium . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip 12 | unzip -o rust-libp2p-${commitSha}.zip 13 | 14 | rust-libp2p-${commitSha}.zip: 15 | wget -O $@ "https://github.com/libp2p/rust-libp2p/archive/${commitSha}.zip" 16 | 17 | clean: 18 | rm image.json 19 | rm rust-libp2p-*.zip 20 | rm -rf rust-libp2p-* 21 | -------------------------------------------------------------------------------- /transport-interop/impl/rust-chromium/v0.54/Makefile: -------------------------------------------------------------------------------- 1 | image_name := rust-chromium-v0.54 2 | commitSha := d7beb55f672dce54017fa4b30f67ecb8d66b9810 3 | 4 | all: image.json 5 | 6 | image.json: rust-libp2p-${commitSha} 7 | cd rust-libp2p-${commitSha} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f interop-tests/Dockerfile.chromium . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip 12 | unzip -o rust-libp2p-${commitSha}.zip 13 | 14 | rust-libp2p-${commitSha}.zip: 15 | wget -O $@ "https://github.com/libp2p/rust-libp2p/archive/${commitSha}.zip" 16 | 17 | clean: 18 | rm image.json 19 | rm rust-libp2p-*.zip 20 | rm -rf rust-libp2p-* 21 | -------------------------------------------------------------------------------- /transport-interop/impl/rust/.gitignore: -------------------------------------------------------------------------------- 1 | rust-libp2p-*.zip 2 | rust-libp2p-* 3 | rust-libp2p-*/* 4 | image.json 5 | chromium-image.json 6 | -------------------------------------------------------------------------------- /transport-interop/impl/rust/v0.53/Makefile: -------------------------------------------------------------------------------- 1 | image_name := rust-v0.53 2 | commitSha := b7914e407da34c99fb76dcc300b3d44b9af97fac 3 | 4 | all: image.json 5 | 6 | image.json: rust-libp2p-${commitSha} 7 | cd rust-libp2p-${commitSha} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f interop-tests/Dockerfile.native . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip 12 | unzip -o rust-libp2p-${commitSha}.zip 13 | 14 | rust-libp2p-${commitSha}.zip: 15 | wget -O $@ "https://github.com/libp2p/rust-libp2p/archive/${commitSha}.zip" 16 | 17 | clean: 18 | rm image.json 19 | rm rust-libp2p-*.zip 20 | rm -rf rust-libp2p-* 21 | -------------------------------------------------------------------------------- /transport-interop/impl/rust/v0.54/Makefile: -------------------------------------------------------------------------------- 1 | image_name := rust-v0.54 2 | commitSha := d7beb55f672dce54017fa4b30f67ecb8d66b9810 3 | 4 | all: image.json 5 | 6 | image.json: rust-libp2p-${commitSha} 7 | cd rust-libp2p-${commitSha} && IMAGE_NAME=${image_name} ../../../../dockerBuildWrapper.sh -f interop-tests/Dockerfile.native . 8 | docker image inspect ${image_name} -f "{{.Id}}" | \ 9 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 10 | 11 | rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip 12 | unzip -o rust-libp2p-${commitSha}.zip 13 | 14 | rust-libp2p-${commitSha}.zip: 15 | wget -O $@ "https://github.com/libp2p/rust-libp2p/archive/${commitSha}.zip" 16 | 17 | clean: 18 | rm image.json 19 | rm rust-libp2p-*.zip 20 | rm -rf rust-libp2p-* 21 | -------------------------------------------------------------------------------- /transport-interop/impl/zig/.gitignore: -------------------------------------------------------------------------------- 1 | zig-libp2p-* 2 | -------------------------------------------------------------------------------- /transport-interop/impl/zig/v0.0.1/Makefile: -------------------------------------------------------------------------------- 1 | image_name := zig-v0.0.1 2 | commitSha := d4a679ee48acae25b5c55c91918f89dec1b78e85 3 | 4 | all: image.json print-cpu-info 5 | 6 | print-cpu-info: image.json 7 | docker run --rm --entrypoint /app/zig/bin/zig $$(jq -r .imageID image.json) build-exe --show-builtin 8 | 9 | image.json: 10 | wget -O zig-libp2p-${commitSha}.zip "https://github.com/marcopolo/zig-libp2p/archive/${commitSha}.zip" 11 | unzip -o zig-libp2p-${commitSha}.zip 12 | cd zig-libp2p-${commitSha} && docker build -t ${image_name} -f interop/Dockerfile . 13 | docker image inspect ${image_name} -f "{{.Id}}" | \ 14 | xargs -I {} echo "{\"imageID\": \"{}\"}" > $@ 15 | 16 | clean: 17 | rm -rf image.json zig-libp2p-*.zip zig-libp2p-* 18 | 19 | .PHONY: all clean print-cpu-info 20 | -------------------------------------------------------------------------------- /transport-interop/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@libp2p/transport-interop", 3 | "version": "0.0.1", 4 | "description": "Tests libp2p implementations against each other across various dimensions.", 5 | "main": "testplans.ts", 6 | "scripts": { 7 | "test": "ts-node src/compose-stdout-helper.ts && ts-node testplans.ts", 8 | "renderResults": "ts-node renderResults.ts", 9 | "cache": "ts-node helpers/cache.ts" 10 | }, 11 | "author": "marcopolo", 12 | "license": "MIT", 13 | "devDependencies": { 14 | "ts-node": "^10.9.1", 15 | "typescript": "^4.9.3" 16 | }, 17 | "dependencies": { 18 | "@types/yargs": "^17.0.19", 19 | "csv-parse": "^5.3.3", 20 | "csv-stringify": "^6.2.3", 21 | "ignore": "^5.2.4", 22 | "json-schema-to-typescript": "^11.0.2", 23 | "sqlite": "^4.1.2", 24 | "sqlite3": "^5.1.2", 25 | "yaml": "^2.2.1", 26 | "yargs": "^17.6.2" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /transport-interop/renderResults.ts: -------------------------------------------------------------------------------- 1 | import { generateTable, load, markdownTable } from './src/lib' 2 | 3 | // Read results.csv 4 | export async function render() { 5 | const runs = load("results.csv") 6 | 7 | const regex = /(?.+) x (?.+) \((?.*)\)/ 8 | const parsedRuns = runs.map(run => { 9 | const match = run.name.match(regex) 10 | if (!match || match.groups === undefined) { 11 | throw new Error(`Run ID ${run.name} does not match the expected format`); 12 | } 13 | return { 14 | ...run, 15 | implA: match.groups.implA, 16 | implB: match.groups.implB, 17 | options: match.groups.options.split(",").map(option => option.replace("_", " ").trim()), 18 | } 19 | }) 20 | 21 | // Group by options 22 | const runsByOptions = parsedRuns.reduce((acc: { [key: string]: any }, run) => { 23 | acc[JSON.stringify(run.options)] = [...acc[JSON.stringify(run.options)] || [], run] 24 | return acc 25 | }, {}) 26 | 27 | let outMd = "" 28 | 29 | for (const runGroup of Object.values(runsByOptions)) { 30 | outMd += `## Using: ${runGroup[0].options.join(", ")}\n` 31 | const table = generateTable(runGroup) 32 | outMd += markdownTable(table) 33 | outMd += "\n\n" 34 | } 35 | 36 | console.log(outMd) 37 | 38 | } 39 | 40 | render() 41 | 42 | -------------------------------------------------------------------------------- /transport-interop/src/compose-runner.ts: -------------------------------------------------------------------------------- 1 | import { tmpdir } from 'os' 2 | import { promises as fs } from 'fs'; 3 | import path from 'path'; 4 | import { exec as execStd } from 'child_process'; 5 | import util from 'util'; 6 | import { ComposeSpecification, PropertiesServices } from "../compose-spec/compose-spec"; 7 | import { stringify } from 'yaml'; 8 | import { dialerStdout, dialerTimings } from './compose-stdout-helper'; 9 | 10 | const exec = util.promisify(execStd); 11 | const timeoutSecs = getTimeout(); 12 | 13 | function getTimeout (): number { 14 | const timeout = parseInt(process.env.TIMEOUT, 10) 15 | 16 | if (isNaN(timeout)) { 17 | return 10 * 60 18 | } 19 | 20 | return timeout 21 | } 22 | 23 | export type RunOpts = { 24 | up: { 25 | exitCodeFrom: string 26 | renewAnonVolumes?: boolean 27 | } 28 | } 29 | 30 | export type RunFailure = any 31 | 32 | export async function run(namespace: string, compose: ComposeSpecification, opts: RunOpts): Promise { 33 | // sanitize namespace 34 | const sanitizedNamespace = namespace.replace(/[^a-zA-Z0-9]/g, "-") 35 | const dir = path.join(tmpdir(), "compose-runner", sanitizedNamespace) 36 | 37 | // Check if directory exists 38 | try { 39 | await fs.access(dir) 40 | await fs.rm(dir, { recursive: true, force: true }) 41 | } catch (e) { 42 | } 43 | await fs.mkdir(dir, { recursive: true }) 44 | 45 | // Create compose.yaml file 46 | // Some docker compose environments don't like the name field to have special characters 47 | const sanitizedComposeName = compose?.name.replace(/[^a-zA-Z0-9_-]/g, "_") 48 | await fs.writeFile(path.join(dir, "compose.yaml"), stringify({ ...compose, name: sanitizedComposeName })) 49 | 50 | const upFlags: Array = [] 51 | if (opts.up.exitCodeFrom) { 52 | upFlags.push(`--exit-code-from=${opts.up.exitCodeFrom}`) 53 | } 54 | if (opts.up.renewAnonVolumes) { 55 | upFlags.push("--renew-anon-volumes") 56 | } 57 | 58 | try { 59 | const { stdout, stderr } = await exec(`docker compose -f ${path.join(dir, "compose.yaml")} up ${upFlags.join(" ")}`, { 60 | signal: AbortSignal.timeout(1000 * timeoutSecs) 61 | }) 62 | 63 | try { 64 | const testResultsParsed = dialerTimings(dialerStdout(stdout)) 65 | console.log("Finished:", namespace, testResultsParsed) 66 | } catch (e) { 67 | console.log("Failed to parse test results.") 68 | console.log("stdout:") 69 | console.log(stdout) 70 | console.log("") 71 | console.log("stderr:") 72 | console.log(stderr) 73 | console.log("") 74 | throw e 75 | } 76 | } catch (e: any) { 77 | console.log("Failure", e) 78 | return e 79 | } finally { 80 | try { 81 | const { stdout, stderr } = await exec(`docker compose -f ${path.join(dir, "compose.yaml")} down`); 82 | } catch (e) { 83 | console.log("Failed to compose down", e) 84 | } 85 | await fs.rm(dir, { recursive: true, force: true }) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /transport-interop/src/lib.ts: -------------------------------------------------------------------------------- 1 | import * as csv from "csv-parse/sync"; 2 | import fs from "fs"; 3 | 4 | export type ResultLine = { 5 | name: string; 6 | outcome: string; 7 | error: string; 8 | }; 9 | 10 | export type ParsedResultLine = { 11 | name: string; 12 | outcome: string; 13 | error: string; 14 | implA: string; 15 | implB: string; 16 | }; 17 | 18 | export type ResultFile = ResultLine[]; 19 | 20 | export type CellRender = (a: string, b: string, line: ResultLine) => string; 21 | 22 | /** 23 | * called for every cell in the table. 24 | * 25 | * This is designed to let future implementers add more complex ouput interpretation, with nested tables, etc. 26 | */ 27 | export const defaultCellRender: CellRender = (a, b, line) => { 28 | let result = ":red_circle:"; 29 | 30 | if (line.outcome === "success") { 31 | result = ":green_circle:"; 32 | } 33 | 34 | if (process.env.RUN_URL) { 35 | result = `[${result}](${process.env.RUN_URL})`; 36 | } 37 | 38 | return result; 39 | }; 40 | 41 | export const load = (path: string): ResultFile => { 42 | return csv.parse(fs.readFileSync(path, "utf8"), { 43 | columns: true, 44 | skip_empty_lines: true, 45 | delimiter: ",", 46 | }) as ResultFile; 47 | }; 48 | 49 | export const save = (path: string, content: string) => { 50 | fs.writeFileSync(path, content); 51 | }; 52 | 53 | type PairOfImplementation = [string, string]; 54 | 55 | export const listUniqPairs = (pairs: PairOfImplementation[]): string[] => { 56 | const uniq = new Set(); 57 | 58 | for (const [a, b] of pairs) { 59 | uniq.add(a); 60 | uniq.add(b); 61 | } 62 | 63 | return Array.from(uniq).sort(); 64 | }; 65 | 66 | export const generateEmptyMatrix = ( 67 | keys: string[], 68 | defaultValue: string 69 | ): string[][] => { 70 | const header = [" ", ...keys]; 71 | 72 | const matrix = [header]; 73 | const rowOfDefaultValues = Array(keys.length).fill(defaultValue); 74 | 75 | for (const key of keys) { 76 | const row = [key, ...rowOfDefaultValues]; 77 | matrix.push(row); 78 | } 79 | 80 | return matrix; 81 | }; 82 | 83 | export const generateTable = ( 84 | results: Array, 85 | defaultValue: string = ":white_circle:", 86 | testedCell: CellRender = defaultCellRender 87 | ): string[][] => { 88 | const pairs = results.map(({ implA, implB }) => [implA, implB] as PairOfImplementation); 89 | const uniqPairs = listUniqPairs(pairs); 90 | 91 | const matrix = generateEmptyMatrix(uniqPairs, defaultValue); 92 | matrix[0][0] = "⬇️ dialer 📞 \\ ➡️ listener 🎧" 93 | 94 | for (const result of results) { 95 | const { implA, implB } = result 96 | const i = uniqPairs.indexOf(implA); 97 | const j = uniqPairs.indexOf(implB); 98 | 99 | const cell = testedCell(implA, implB, result); 100 | 101 | matrix[i + 1][j + 1] = cell; 102 | } 103 | 104 | return matrix; 105 | }; 106 | 107 | export const markdownTable = (table: string[][]): string => { 108 | const wrapped = (x: string) => `| ${x} |`; 109 | 110 | const header = table[0].join(" | "); 111 | const separator = table[0].map((x) => "-".repeat(x.length)).join(" | "); 112 | 113 | const rows = table.slice(1).map((row) => row.join(" | ")); 114 | 115 | const body = [wrapped(header), wrapped(separator), ...rows.map(wrapped)].join( 116 | "\n" 117 | ); 118 | 119 | return body; 120 | }; 121 | -------------------------------------------------------------------------------- /transport-interop/testplans.ts: -------------------------------------------------------------------------------- 1 | import { buildTestSpecs } from "./src/generator" 2 | import { Version, versions } from "./versions" 3 | import { promises as fs } from "fs"; 4 | import { run, RunFailure } from "./src/compose-runner" 5 | import { stringify } from "csv-stringify/sync" 6 | import { stringify as YAMLStringify } from "yaml" 7 | import yargs from "yargs/yargs" 8 | import path from "path"; 9 | 10 | (async () => { 11 | const WorkerCount = parseInt(process.env.WORKER_COUNT || "1") 12 | const argv = await yargs(process.argv.slice(2)) 13 | .options({ 14 | 'name-filter': { 15 | description: 'Only run tests including any of these names (pipe separated)', 16 | default: "", 17 | }, 18 | 'name-ignore': { 19 | description: 'Do not run any tests including any of these names (pipe separated)', 20 | default: "", 21 | }, 22 | 'emit-only': { 23 | alias: 'e', 24 | description: 'Only print the compose.yaml file', 25 | default: false, 26 | type: 'boolean' 27 | }, 28 | 'extra-versions-dir': { 29 | description: 'Look for extra versions in this directory. Version files must be in json format', 30 | default: "", 31 | type: 'string' 32 | }, 33 | 'extra-version': { 34 | description: 'Paths to JSON files for additional versions to include in the test matrix', 35 | default: [], 36 | type: 'array' 37 | }, 38 | 'verbose': { 39 | description: 'Enable verbose logging', 40 | default: false, 41 | type: 'boolean' 42 | } 43 | }) 44 | .help() 45 | .version(false) 46 | .alias('help', 'h').argv; 47 | const extraVersionsDir = argv.extraVersionsDir 48 | const extraVersions: Array = [] 49 | if (extraVersionsDir !== "") { 50 | try { 51 | const files = await fs.readdir(extraVersionsDir); 52 | for (const file of files) { 53 | const contents = await fs.readFile(path.join(extraVersionsDir, file)) 54 | extraVersions.push(...JSON.parse(contents.toString())) 55 | } 56 | } catch (err) { 57 | console.error("Error reading extra versions") 58 | console.error(err); 59 | } 60 | } 61 | 62 | for (let versionPath of argv.extraVersion.filter(p => p !== "")) { 63 | const contents = await fs.readFile(versionPath); 64 | extraVersions.push(JSON.parse(contents.toString())) 65 | } 66 | 67 | const verbose: boolean = argv.verbose 68 | 69 | let nameFilter: string[] | null = null 70 | const rawNameFilter: string | undefined = argv["name-filter"] 71 | if (rawNameFilter) { 72 | if (verbose) { 73 | console.log("rawNameFilter: " + rawNameFilter) 74 | } 75 | nameFilter = rawNameFilter.split('|').map(item => item.trim()); 76 | } 77 | if (nameFilter) { 78 | console.log("Name Filters:") 79 | nameFilter.map(n => console.log("\t" + n)) 80 | } 81 | let nameIgnore: string[] | null = null 82 | const rawNameIgnore: string | undefined = argv["name-ignore"] 83 | if (rawNameIgnore) { 84 | if (verbose) { 85 | console.log("rawNameIgnore: " + rawNameIgnore) 86 | } 87 | nameIgnore = rawNameIgnore.split('|').map(item => item.trim()); 88 | } 89 | if (nameIgnore) { 90 | console.log("Name Ignores:") 91 | nameIgnore.map(n => console.log("\t" + n)) 92 | } 93 | 94 | let testSpecs = await buildTestSpecs(versions.concat(extraVersions), nameFilter, nameIgnore, verbose) 95 | 96 | if (argv["emit-only"]) { 97 | for (const testSpec of testSpecs) { 98 | console.log("## " + testSpec.name) 99 | console.log(YAMLStringify(testSpec)) 100 | console.log("\n\n") 101 | } 102 | return 103 | } 104 | 105 | console.log(`Running ${testSpecs.length} tests`) 106 | const failures: Array = [] 107 | const statuses: Array = [["name", "outcome"]] 108 | const workers = new Array(WorkerCount).fill({}).map(async () => { 109 | while (true) { 110 | const testSpec = testSpecs.pop() 111 | if (testSpec == null) { 112 | return 113 | } 114 | console.log("Running test spec: " + testSpec.name) 115 | const failure = await run(testSpec.name || "unknown test", testSpec, { up: { exitCodeFrom: "dialer", renewAnonVolumes: true }, }) 116 | if (failure != null) { 117 | failures.push(failure) 118 | statuses.push([testSpec.name || "unknown test", "failure"]) 119 | } else { 120 | statuses.push([testSpec.name || "unknown test", "success"]) 121 | } 122 | } 123 | }) 124 | await Promise.all(workers) 125 | 126 | console.log(`${failures.length} failures`, failures) 127 | await fs.writeFile("results.csv", stringify(statuses)) 128 | 129 | console.log("Run complete") 130 | })() 131 | -------------------------------------------------------------------------------- /transport-interop/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "resolveJsonModule": true, 4 | "esModuleInterop": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /transport-interop/versions.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs" 2 | import path from "path" 3 | 4 | export type Version = { 5 | id: string, 6 | // This can be the image ID, or a function that takes the version ID and returns the image ID. 7 | // By default it uses the canonicalImageIDLookup. 8 | containerImageID?: string | ((id: string) => string), 9 | // If defined, this will increase the timeout for tests using this version 10 | timeoutSecs?: number, 11 | transports: Array<(string | { name: string, onlyDial: boolean })>, 12 | secureChannels: string[], 13 | muxers: string[] 14 | } 15 | 16 | function canonicalImagePath(id: string): string { 17 | // Split by implementation and version 18 | const [impl, version] = id.split("-v") 19 | // Drop the patch version 20 | const [major, minor, patch] = version.split(".") 21 | let versionFolder = `v${major}.${minor}` 22 | if (major === "0" && minor === "0") { 23 | // We're still in the 0.0.x phase, so we use the patch version 24 | versionFolder = `v0.0.${patch}` 25 | } 26 | // Read the image ID from the JSON file on the filesystem 27 | return `./impl/${impl}/${versionFolder}/image.json` 28 | } 29 | 30 | // Loads the container image id for the given version id. Expects the form of 31 | // "-vX.Y.Z" or "vX.Y" and the image id to be in the file 32 | // "./impl//vX.Y/image.json" or "./impl//v0.0.Z/image.json" 33 | function canonicalImageIDLookup(id: string): string { 34 | const imageIDJSON = fs.readFileSync(canonicalImagePath(id), "utf8") 35 | const imageID = JSON.parse(imageIDJSON).imageID 36 | return imageID 37 | } 38 | 39 | // Loads the container image id for the given browser version id. Expects the 40 | // form of "--vX.Y.Z" or "vX.Y" and the image id to be in the file 41 | // "./impl//vX.Y/-image.json" or "./impl//v0.0.Z/-image.json" 42 | function browserImageIDLookup(id: string): string { 43 | const [browser, ...rest] = id.split("-") 44 | const parentDir = path.dirname(canonicalImagePath(rest.join("-"))) 45 | 46 | // Read the image ID from the JSON file on the filesystem 47 | const imageIDJSON = fs.readFileSync(path.join(parentDir, `${browser}-image.json`), "utf8") 48 | const imageID = JSON.parse(imageIDJSON).imageID 49 | return imageID 50 | } 51 | 52 | export const versions: Array = JSON.parse(fs.readFileSync(path.join(__dirname, 'versionsInput.json') , 'utf8')).map((v: Version) => { 53 | switch(v.containerImageID) { 54 | case "browser": 55 | return { ...v, containerImageID: browserImageIDLookup } 56 | case "canonical": 57 | default: 58 | return { ...v, containerImageID: canonicalImageIDLookup } 59 | } 60 | }); 61 | --------------------------------------------------------------------------------