├── .github ├── renovate.json5 └── workflows │ ├── release.yml │ └── test.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── THIRDPARTY.toml ├── build.rs ├── clippy.toml ├── deny.toml ├── dist-workspace.toml ├── src ├── app.rs ├── asyncutil │ ├── mod.rs │ ├── shutdown_group.rs │ ├── unique_stream.rs │ └── worker_nursery.rs ├── config.rs ├── consts.rs ├── magnet.rs ├── main.rs ├── peer │ ├── extensions.rs │ ├── messages.rs │ ├── mod.rs │ └── msepe.rs ├── torrent.rs ├── tracker │ ├── http.rs │ ├── mod.rs │ └── udp.rs ├── types.rs └── util.rs └── tests └── cli.rs /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": ["github>jwodder/renovate-config:config.json5"], 4 | } 5 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by dist: https://opensource.axo.dev/cargo-dist/ 2 | # 3 | # Copyright 2022-2024, axodotdev 4 | # SPDX-License-Identifier: MIT or Apache-2.0 5 | # 6 | # CI that: 7 | # 8 | # * checks for a Git Tag that looks like a release 9 | # * builds artifacts with dist (archives, installers, hashes) 10 | # * uploads those artifacts to temporary workflow zip 11 | # * on success, uploads the artifacts to a GitHub Release 12 | # 13 | # Note that the GitHub Release will be created with a generated 14 | # title/body based on your changelogs. 15 | 16 | name: Release 17 | permissions: 18 | "contents": "write" 19 | 20 | # This task will run whenever you push a git tag that looks like a version 21 | # like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc. 22 | # Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where 23 | # PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION 24 | # must be a Cargo-style SemVer Version (must have at least major.minor.patch). 25 | # 26 | # If PACKAGE_NAME is specified, then the announcement will be for that 27 | # package (erroring out if it doesn't have the given version or isn't dist-able). 28 | # 29 | # If PACKAGE_NAME isn't specified, then the announcement will be for all 30 | # (dist-able) packages in the workspace with that version (this mode is 31 | # intended for workspaces with only one dist-able package, or with all dist-able 32 | # packages versioned/released in lockstep). 33 | # 34 | # If you push multiple tags at once, separate instances of this workflow will 35 | # spin up, creating an independent announcement for each one. However, GitHub 36 | # will hard limit this to 3 tags per commit, as it will assume more tags is a 37 | # mistake. 38 | # 39 | # If there's a prerelease-style suffix to the version, then the release(s) 40 | # will be marked as a prerelease. 41 | on: 42 | pull_request: 43 | push: 44 | tags: 45 | - '**[0-9]+.[0-9]+.[0-9]+*' 46 | 47 | jobs: 48 | # Run 'dist plan' (or host) to determine what tasks we need to do 49 | plan: 50 | runs-on: "ubuntu-22.04" 51 | outputs: 52 | val: ${{ steps.plan.outputs.manifest }} 53 | tag: ${{ !github.event.pull_request && github.ref_name || '' }} 54 | tag-flag: ${{ !github.event.pull_request && format('--tag={0}', github.ref_name) || '' }} 55 | publishing: ${{ !github.event.pull_request }} 56 | env: 57 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 58 | steps: 59 | - uses: actions/checkout@v4 60 | with: 61 | submodules: recursive 62 | - name: Install dist 63 | # we specify bash to get pipefail; it guards against the `curl` command 64 | # failing. otherwise `sh` won't catch that `curl` returned non-0 65 | shell: bash 66 | run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.28.0/cargo-dist-installer.sh | sh" 67 | - name: Cache dist 68 | uses: actions/upload-artifact@v4 69 | with: 70 | name: cargo-dist-cache 71 | path: ~/.cargo/bin/dist 72 | # sure would be cool if github gave us proper conditionals... 73 | # so here's a doubly-nested ternary-via-truthiness to try to provide the best possible 74 | # functionality based on whether this is a pull_request, and whether it's from a fork. 75 | # (PRs run on the *source* but secrets are usually on the *target* -- that's *good* 76 | # but also really annoying to build CI around when it needs secrets to work right.) 77 | - id: plan 78 | run: | 79 | dist ${{ (!github.event.pull_request && format('host --steps=create --tag={0}', github.ref_name)) || 'plan' }} --output-format=json > plan-dist-manifest.json 80 | echo "dist ran successfully" 81 | cat plan-dist-manifest.json 82 | echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT" 83 | - name: "Upload dist-manifest.json" 84 | uses: actions/upload-artifact@v4 85 | with: 86 | name: artifacts-plan-dist-manifest 87 | path: plan-dist-manifest.json 88 | 89 | # Build and packages all the platform-specific things 90 | build-local-artifacts: 91 | name: build-local-artifacts (${{ join(matrix.targets, ', ') }}) 92 | # Let the initial task tell us to not run (currently very blunt) 93 | needs: 94 | - plan 95 | if: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix.include != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} 96 | strategy: 97 | fail-fast: false 98 | # Target platforms/runners are computed by dist in create-release. 99 | # Each member of the matrix has the following arguments: 100 | # 101 | # - runner: the github runner 102 | # - dist-args: cli flags to pass to dist 103 | # - install-dist: expression to run to install dist on the runner 104 | # 105 | # Typically there will be: 106 | # - 1 "global" task that builds universal installers 107 | # - N "local" tasks that build each platform's binaries and platform-specific installers 108 | matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} 109 | runs-on: ${{ matrix.runner }} 110 | container: ${{ matrix.container && matrix.container.image || null }} 111 | env: 112 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 113 | BUILD_MANIFEST_NAME: target/distrib/${{ join(matrix.targets, '-') }}-dist-manifest.json 114 | steps: 115 | - name: enable windows longpaths 116 | run: | 117 | git config --global core.longpaths true 118 | - uses: actions/checkout@v4 119 | with: 120 | submodules: recursive 121 | - name: Install Rust non-interactively if not already installed 122 | if: ${{ matrix.container }} 123 | run: | 124 | if ! command -v cargo > /dev/null 2>&1; then 125 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 126 | echo "$HOME/.cargo/bin" >> $GITHUB_PATH 127 | fi 128 | - name: Install dist 129 | run: ${{ matrix.install_dist.run }} 130 | # Get the dist-manifest 131 | - name: Fetch local artifacts 132 | uses: actions/download-artifact@v4 133 | with: 134 | pattern: artifacts-* 135 | path: target/distrib/ 136 | merge-multiple: true 137 | - name: Install dependencies 138 | run: | 139 | ${{ matrix.packages_install }} 140 | - name: Build artifacts 141 | run: | 142 | # Actually do builds and make zips and whatnot 143 | dist build ${{ needs.plan.outputs.tag-flag }} --print=linkage --output-format=json ${{ matrix.dist_args }} > dist-manifest.json 144 | echo "dist ran successfully" 145 | - id: cargo-dist 146 | name: Post-build 147 | # We force bash here just because github makes it really hard to get values up 148 | # to "real" actions without writing to env-vars, and writing to env-vars has 149 | # inconsistent syntax between shell and powershell. 150 | shell: bash 151 | run: | 152 | # Parse out what we just built and upload it to scratch storage 153 | echo "paths<> "$GITHUB_OUTPUT" 154 | dist print-upload-files-from-manifest --manifest dist-manifest.json >> "$GITHUB_OUTPUT" 155 | echo "EOF" >> "$GITHUB_OUTPUT" 156 | 157 | cp dist-manifest.json "$BUILD_MANIFEST_NAME" 158 | - name: "Upload artifacts" 159 | uses: actions/upload-artifact@v4 160 | with: 161 | name: artifacts-build-local-${{ join(matrix.targets, '_') }} 162 | path: | 163 | ${{ steps.cargo-dist.outputs.paths }} 164 | ${{ env.BUILD_MANIFEST_NAME }} 165 | 166 | # Build and package all the platform-agnostic(ish) things 167 | build-global-artifacts: 168 | needs: 169 | - plan 170 | - build-local-artifacts 171 | runs-on: "ubuntu-22.04" 172 | env: 173 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 174 | BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json 175 | steps: 176 | - uses: actions/checkout@v4 177 | with: 178 | submodules: recursive 179 | - name: Install cached dist 180 | uses: actions/download-artifact@v4 181 | with: 182 | name: cargo-dist-cache 183 | path: ~/.cargo/bin/ 184 | - run: chmod +x ~/.cargo/bin/dist 185 | # Get all the local artifacts for the global tasks to use (for e.g. checksums) 186 | - name: Fetch local artifacts 187 | uses: actions/download-artifact@v4 188 | with: 189 | pattern: artifacts-* 190 | path: target/distrib/ 191 | merge-multiple: true 192 | - id: cargo-dist 193 | shell: bash 194 | run: | 195 | dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json 196 | echo "dist ran successfully" 197 | 198 | # Parse out what we just built and upload it to scratch storage 199 | echo "paths<> "$GITHUB_OUTPUT" 200 | jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT" 201 | echo "EOF" >> "$GITHUB_OUTPUT" 202 | 203 | cp dist-manifest.json "$BUILD_MANIFEST_NAME" 204 | - name: "Upload artifacts" 205 | uses: actions/upload-artifact@v4 206 | with: 207 | name: artifacts-build-global 208 | path: | 209 | ${{ steps.cargo-dist.outputs.paths }} 210 | ${{ env.BUILD_MANIFEST_NAME }} 211 | # Determines if we should publish/announce 212 | host: 213 | needs: 214 | - plan 215 | - build-local-artifacts 216 | - build-global-artifacts 217 | # Only run if we're "publishing", and only if local and global didn't fail (skipped is fine) 218 | if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.build-local-artifacts.result == 'skipped' || needs.build-local-artifacts.result == 'success') }} 219 | env: 220 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 221 | runs-on: "ubuntu-22.04" 222 | outputs: 223 | val: ${{ steps.host.outputs.manifest }} 224 | steps: 225 | - uses: actions/checkout@v4 226 | with: 227 | submodules: recursive 228 | - name: Install cached dist 229 | uses: actions/download-artifact@v4 230 | with: 231 | name: cargo-dist-cache 232 | path: ~/.cargo/bin/ 233 | - run: chmod +x ~/.cargo/bin/dist 234 | # Fetch artifacts from scratch-storage 235 | - name: Fetch artifacts 236 | uses: actions/download-artifact@v4 237 | with: 238 | pattern: artifacts-* 239 | path: target/distrib/ 240 | merge-multiple: true 241 | - id: host 242 | shell: bash 243 | run: | 244 | dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json 245 | echo "artifacts uploaded and released successfully" 246 | cat dist-manifest.json 247 | echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" 248 | - name: "Upload dist-manifest.json" 249 | uses: actions/upload-artifact@v4 250 | with: 251 | # Overwrite the previous copy 252 | name: artifacts-dist-manifest 253 | path: dist-manifest.json 254 | # Create a GitHub Release while uploading all files to it 255 | - name: "Download GitHub Artifacts" 256 | uses: actions/download-artifact@v4 257 | with: 258 | pattern: artifacts-* 259 | path: artifacts 260 | merge-multiple: true 261 | - name: Cleanup 262 | run: | 263 | # Remove the granular manifests 264 | rm -f artifacts/*-dist-manifest.json 265 | - name: Create GitHub Release 266 | env: 267 | PRERELEASE_FLAG: "${{ fromJson(steps.host.outputs.manifest).announcement_is_prerelease && '--prerelease' || '' }}" 268 | ANNOUNCEMENT_TITLE: "${{ fromJson(steps.host.outputs.manifest).announcement_title }}" 269 | ANNOUNCEMENT_BODY: "${{ fromJson(steps.host.outputs.manifest).announcement_github_body }}" 270 | RELEASE_COMMIT: "${{ github.sha }}" 271 | run: | 272 | # Write and read notes from a file to avoid quoting breaking things 273 | echo "$ANNOUNCEMENT_BODY" > $RUNNER_TEMP/notes.txt 274 | 275 | gh release create "${{ needs.plan.outputs.tag }}" --target "$RELEASE_COMMIT" $PRERELEASE_FLAG --title "$ANNOUNCEMENT_TITLE" --notes-file "$RUNNER_TEMP/notes.txt" artifacts/* 276 | 277 | announce: 278 | needs: 279 | - plan 280 | - host 281 | # use "always() && ..." to allow us to wait for all publish jobs while 282 | # still allowing individual publish jobs to skip themselves (for prereleases). 283 | # "host" however must run to completion, no skipping allowed! 284 | if: ${{ always() && needs.host.result == 'success' }} 285 | runs-on: "ubuntu-22.04" 286 | env: 287 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 288 | steps: 289 | - uses: actions/checkout@v4 290 | with: 291 | submodules: recursive 292 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | schedule: 9 | - cron: '0 12 * * *' 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref_name }} 13 | cancel-in-progress: true 14 | 15 | defaults: 16 | run: 17 | shell: bash 18 | 19 | jobs: 20 | test: 21 | runs-on: ${{ matrix.os }} 22 | name: test (${{ matrix.os }}, ${{ matrix.toolchain }}) 23 | strategy: 24 | fail-fast: false 25 | matrix: 26 | os: 27 | - ubuntu-latest 28 | toolchain: 29 | - msrv 30 | - stable 31 | - beta 32 | - nightly 33 | include: 34 | - os: macos-latest 35 | toolchain: stable 36 | exclude: --exclude-features native-tls-vendored 37 | - os: windows-latest 38 | toolchain: stable 39 | exclude: --exclude-features native-tls-vendored 40 | steps: 41 | - name: Check out repository 42 | uses: actions/checkout@v4 43 | 44 | - name: Install Rust 45 | if: matrix.toolchain != 'msrv' 46 | uses: dtolnay/rust-toolchain@master 47 | with: 48 | toolchain: ${{ matrix.toolchain }} 49 | 50 | - name: Determine minimum supported Rust version 51 | if: matrix.toolchain == 'msrv' 52 | id: msrv 53 | run: | 54 | rust_version="$(cargo metadata --no-deps --format-version=1 | jq -r '.packages[0].rust_version')" 55 | echo "msrv=$rust_version" >> "$GITHUB_OUTPUT" 56 | 57 | - name: Install minimum supported Rust version 58 | if: matrix.toolchain == 'msrv' 59 | uses: dtolnay/rust-toolchain@master 60 | with: 61 | toolchain: ${{ steps.msrv.outputs.msrv }} 62 | 63 | - name: Activate cache 64 | if: "!startsWith(github.head_ref, 'renovate/')" 65 | uses: Swatinem/rust-cache@v2 66 | 67 | - name: Install cargo-hack 68 | uses: taiki-e/install-action@cargo-hack 69 | 70 | - name: Build crate 71 | run: | 72 | cargo hack \ 73 | --each-feature \ 74 | --exclude-no-default-features \ 75 | --exclude-all-features ${{ matrix.exclude }} \ 76 | build --all-targets --verbose 77 | 78 | - name: Test crate 79 | run: | 80 | cargo hack \ 81 | --each-feature \ 82 | --exclude-no-default-features \ 83 | --exclude-all-features ${{ matrix.exclude }} \ 84 | test --verbose 85 | 86 | minimal-versions: 87 | runs-on: ubuntu-latest 88 | steps: 89 | - name: Check out repository 90 | uses: actions/checkout@v4 91 | 92 | - name: Install Rust 93 | uses: dtolnay/rust-toolchain@master 94 | with: 95 | toolchain: stable 96 | 97 | - name: Install nightly Rust 98 | # Needed by cargo-minimal-versions 99 | run: rustup update nightly 100 | 101 | - name: Activate cache 102 | if: "!startsWith(github.head_ref, 'renovate/')" 103 | uses: Swatinem/rust-cache@v2 104 | 105 | - name: Install cargo-hack and cargo-minimal-versions 106 | uses: taiki-e/install-action@v2 107 | with: 108 | tool: cargo-hack,cargo-minimal-versions 109 | 110 | - name: Test crate 111 | run: | 112 | cargo minimal-versions --direct \ 113 | --each-feature \ 114 | --exclude-no-default-features \ 115 | --exclude-all-features \ 116 | build --all-targets --verbose 117 | 118 | - name: Test crate 119 | run: | 120 | cargo minimal-versions --direct \ 121 | --each-feature \ 122 | --exclude-no-default-features \ 123 | --exclude-all-features \ 124 | test --verbose 125 | 126 | coverage: 127 | # This is separate from the main tests because cargo-llvm-cov doesn't run 128 | # doctests. 129 | runs-on: ubuntu-latest 130 | steps: 131 | - name: Check out repository 132 | uses: actions/checkout@v4 133 | 134 | - name: Install Rust 135 | uses: dtolnay/rust-toolchain@master 136 | with: 137 | toolchain: stable 138 | components: llvm-tools 139 | 140 | - name: Activate cache 141 | if: "!startsWith(github.head_ref, 'renovate/')" 142 | uses: Swatinem/rust-cache@v2 143 | 144 | - name: Install cargo-llvm-cov 145 | uses: taiki-e/install-action@v2 146 | with: 147 | tool: cargo-llvm-cov 148 | 149 | - name: Test with coverage 150 | run: cargo llvm-cov --workspace --all-features --lcov --output-path lcov.info 151 | 152 | - name: Upload coverage to Codecov 153 | uses: codecov/codecov-action@v5 154 | with: 155 | files: lcov.info 156 | fail_ci_if_error: false 157 | token: ${{ secrets.CODECOV_TOKEN }} 158 | 159 | lint: 160 | runs-on: ubuntu-latest 161 | steps: 162 | - name: Check out repository 163 | uses: actions/checkout@v4 164 | 165 | - name: Install Rust 166 | uses: dtolnay/rust-toolchain@master 167 | with: 168 | toolchain: stable 169 | components: clippy, rustfmt 170 | 171 | - name: Activate cache 172 | if: "!startsWith(github.head_ref, 'renovate/')" 173 | uses: Swatinem/rust-cache@v2 174 | 175 | - name: Install cargo-hack 176 | uses: taiki-e/install-action@cargo-hack 177 | 178 | - name: Check code 179 | run: cargo hack --workspace --feature-powerset clippy -- -Dwarnings 180 | 181 | - name: Check tests & examples 182 | run: cargo hack --workspace --feature-powerset clippy --tests --examples -- -Dwarnings 183 | 184 | - name: Check formatting 185 | run: cargo fmt --all --check 186 | 187 | deny: 188 | runs-on: ubuntu-latest 189 | steps: 190 | - name: Check out repository 191 | uses: actions/checkout@v4 192 | 193 | - name: Run `cargo deny check` 194 | uses: EmbarkStudios/cargo-deny-action@v2 195 | 196 | thirdparty: 197 | runs-on: ubuntu-latest 198 | steps: 199 | - name: Check out repository 200 | uses: actions/checkout@v4 201 | 202 | # This is necessary as of 2025-02-23 because cargo seems to have changed 203 | # its algorithm for the digest in registry paths in 1.85, yet GitHub 204 | # Actions uses an older version of rust by default, leading to mismatches 205 | # for crates like `ring` that don't specify "license" metadata. 206 | - name: Install latest stable Rust 207 | uses: dtolnay/rust-toolchain@master 208 | with: 209 | toolchain: stable 210 | 211 | - name: Install cargo-bundle-licenses 212 | uses: taiki-e/cache-cargo-install-action@v2 213 | with: 214 | tool: cargo-bundle-licenses 215 | locked: false # Ensure we get the latest version of the spdx crate 216 | 217 | - name: Check that third party licenses haven't changed 218 | run: | 219 | cargo bundle-licenses --check-previous \ 220 | --features rustls \ 221 | -f toml -o CI.toml -p THIRDPARTY.toml 222 | 223 | # vim:set et sts=2: 224 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.4.0 4 | hooks: 5 | - id: check-added-large-files 6 | exclude: THIRDPARTY.toml 7 | - id: check-json 8 | - id: check-toml 9 | - id: check-yaml 10 | - id: end-of-file-fixer 11 | - id: trailing-whitespace 12 | exclude: THIRDPARTY.toml 13 | 14 | - repo: https://github.com/doublify/pre-commit-rust 15 | rev: v1.0 16 | hooks: 17 | - id: clippy 18 | args: ["--all-features", "--all-targets"] 19 | - id: fmt 20 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | v0.5.0 (2025-05-20) 2 | ------------------- 3 | - Fix "Saving torrent to file" message when torrent is actually being written 4 | to stdout 5 | - Added support for configuration files 6 | - Added support for MSE/PE-encrypted peer connections 7 | 8 | v0.4.0 (2025-05-17) 9 | ------------------- 10 | - Increased MSRV to 1.82 11 | - Linux release artifacts are now built on Ubuntu 22.04 (up from Ubuntu 20.04), 12 | which may result in a more recent glibc being required 13 | - Added a `--json` option to `query-tracker` 14 | 15 | v0.3.1 (2025-02-23) 16 | ------------------- 17 | - Fix license bundle distributed with release assets 18 | 19 | v0.3.0 (2025-01-30) 20 | ------------------- 21 | - Publicly expose & document the `query-tracker` and `query-peer` subcommands 22 | - Add `native-tls`, `native-tls-vendored`, and `rustls` features 23 | 24 | v0.2.1 (2024-12-13) 25 | ------------------- 26 | - Increased MSRV to 1.74 27 | - Fixed build error due to changes in linting 28 | 29 | v0.2.0 (2023-12-29) 30 | ------------------- 31 | - Increased MSRV to 1.70 32 | - "Error communicating with {tracker}" warning messages now include the display 33 | name of the corresponding magnet, if known 34 | - Set the "yourip" field in outgoing BEP 10 handshakes, and log the field in 35 | received BEP 10 handshakes 36 | - If the first trackers to return provide a large number of peers, don't stop 37 | polling the futures for the remaining trackers 38 | 39 | v0.1.0 (2023-06-24) 40 | ------------------- 41 | Initial release 42 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "demagnetize" 3 | version = "0.5.0" 4 | edition = "2021" 5 | rust-version = "1.82" 6 | description = "Convert magnet links to .torrent files" 7 | authors = ["John Thorvald Wodder II "] 8 | repository = "https://github.com/jwodder/demagnetize-rs" 9 | documentation = "https://github.com/jwodder/demagnetize-rs" 10 | license = "MIT" 11 | keywords = ["bittorrent", "magnet-link", "torrent"] 12 | categories = ["command-line-utilities", "network-programming"] 13 | exclude = ["/.*"] 14 | 15 | [dependencies] 16 | anstream = "0.6.14" 17 | anstyle = "1.0.7" 18 | async-channel = "2.3.1" 19 | bendy = "0.3.3" 20 | bytes = "1.7.0" 21 | chrono = { version = "0.4.38", default-features = false, features = ["clock", "std"] } 22 | clap = { version = "4.5.4", default-features = false, features = ["derive", "error-context", "help", "std", "suggestions", "usage", "wrap_help"] } 23 | data-encoding = "2.6.0" 24 | dirs = "6.0.0" 25 | fern = "0.7.0" 26 | futures-util = { version = "0.3.30", default-features = false, features = ["sink"] } 27 | generic-array = "0.14.7" 28 | log = "0.4.21" 29 | num-bigint = "0.4.6" 30 | num_enum = "0.7.3" 31 | patharg = { version = "0.4.0", features = ["tokio"] } 32 | pin-project-lite = "0.2.14" 33 | rand = "0.9.0" 34 | rc4 = "0.1.0" 35 | reqwest = { version = "0.12.4", default-features = false, features = ["http2"] } 36 | serde = { version = "1.0.219", features = ["derive"] } 37 | sha1 = "0.10.6" 38 | strum = "0.27.0" 39 | strum_macros = "0.27.0" 40 | thiserror = "2.0.0" 41 | tokio = { version = "1.37.0", features = ["fs", "io-util", "macros", "sync", "rt", "rt-multi-thread", "time", "net"] } 42 | tokio-util = { version = "0.7.11", features = ["codec", "rt"] } 43 | toml = "0.8.22" 44 | url = "2.5.0" 45 | 46 | [build-dependencies] 47 | thiserror = "2.0.0" 48 | 49 | [dev-dependencies] 50 | assert_cmd = "2.0.14" 51 | rand_chacha = "0.9.0" 52 | rstest = { version = "0.25.0", default-features = false } 53 | tempfile = "3.10.1" 54 | 55 | [features] 56 | default = ["native-tls"] 57 | native-tls = ["reqwest/native-tls"] 58 | native-tls-vendored = ["reqwest/native-tls-vendored"] 59 | rustls = [ 60 | "reqwest/rustls-tls", 61 | "reqwest/rustls-tls-native-roots", 62 | "reqwest/rustls-tls-webpki-roots", 63 | ] 64 | 65 | [profile.dist] 66 | inherits = "release" 67 | lto = "thin" 68 | 69 | [lints.rust] 70 | # Lint groups: 71 | deprecated_safe = { level = "deny", priority = -1 } 72 | future_incompatible = { level = "deny", priority = -1 } 73 | refining_impl_trait = { level = "deny", priority = -1 } 74 | rust_2018_idioms = { level = "deny", priority = -1 } 75 | unused = { level = "deny", priority = -1 } 76 | 77 | # Set a lint in "unused" back to "warn" (to be denied again under CI): 78 | dead_code = "warn" 79 | 80 | # Deny various allow-by-default lints: 81 | macro_use_extern_crate = "deny" 82 | missing_debug_implementations = "deny" 83 | redundant_lifetimes = "deny" 84 | single_use_lifetimes = "deny" 85 | trivial_casts = "deny" 86 | unit_bindings = "deny" 87 | unnameable_types = "deny" 88 | unreachable_pub = "deny" 89 | unsafe_code = "deny" 90 | unsafe_op_in_unsafe_fn = "deny" 91 | unused_import_braces = "deny" 92 | unused_lifetimes = "deny" 93 | unused_qualifications = "deny" 94 | 95 | # Deny various warn-by-default lints: 96 | ambiguous_glob_reexports = "deny" 97 | break_with_label_and_loop = "deny" 98 | confusable_idents = "deny" 99 | const_item_mutation = "deny" 100 | deprecated = "deny" 101 | deprecated_where_clause_location = "deny" 102 | drop_bounds = "deny" 103 | dropping_copy_types = "deny" 104 | dropping_references = "deny" 105 | duplicate_macro_attributes = "deny" 106 | dyn_drop = "deny" 107 | exported_private_dependencies = "deny" 108 | for_loops_over_fallibles = "deny" 109 | forgetting_copy_types = "deny" 110 | forgetting_references = "deny" 111 | function_item_references = "deny" 112 | hidden_glob_reexports = "deny" 113 | inline_no_sanitize = "deny" 114 | internal_features = "deny" 115 | invalid_from_utf8 = "deny" 116 | invalid_macro_export_arguments = "deny" 117 | invalid_nan_comparisons = "deny" 118 | invalid_value = "deny" 119 | irrefutable_let_patterns = "deny" 120 | mixed_script_confusables = "deny" 121 | named_arguments_used_positionally = "deny" 122 | non_contiguous_range_endpoints = "deny" 123 | non_fmt_panics = "deny" 124 | non_shorthand_field_patterns = "deny" 125 | noop_method_call = "deny" 126 | opaque_hidden_inferred_bound = "deny" 127 | overlapping_range_endpoints = "deny" 128 | private_bounds = "deny" 129 | private_interfaces = "deny" 130 | special_module_name = "deny" 131 | stable_features = "deny" 132 | static_mut_refs = "deny" 133 | suspicious_double_ref_op = "deny" 134 | trivial_bounds = "deny" 135 | type_alias_bounds = "deny" 136 | unconditional_recursion = "deny" 137 | unexpected_cfgs = "deny" 138 | ungated_async_fn_track_caller = "deny" 139 | unused_associated_type_bounds = "deny" 140 | unused_comparisons = "deny" 141 | while_true = "deny" 142 | 143 | [lints.clippy] 144 | # Deny all warn-by-default lints: 145 | all = { level = "deny", priority = -1 } 146 | 147 | # Warn on various allow-by-default lints (denied under CI): 148 | dbg_macro = "warn" 149 | todo = "warn" 150 | unimplemented = "warn" 151 | 152 | # Deny various allow-by-default lints: 153 | as_underscore = "deny" 154 | bool_to_int_with_if = "deny" 155 | cast_possible_truncation = "deny" 156 | cast_possible_wrap = "deny" 157 | cast_precision_loss = "deny" 158 | cast_sign_loss = "deny" 159 | cfg_not_test = "deny" 160 | checked_conversions = "deny" 161 | clear_with_drain = "deny" 162 | cloned_instead_of_copied = "deny" 163 | collection_is_never_read = "deny" 164 | comparison_chain = "deny" 165 | copy_iterator = "deny" 166 | debug_assert_with_mut_call = "deny" 167 | default_trait_access = "deny" 168 | derive_partial_eq_without_eq = "deny" 169 | doc_link_with_quotes = "deny" 170 | doc_markdown = "deny" 171 | empty_enum_variants_with_brackets = "deny" 172 | empty_structs_with_brackets = "deny" 173 | equatable_if_let = "deny" 174 | exit = "deny" 175 | explicit_into_iter_loop = "deny" 176 | explicit_iter_loop = "deny" 177 | filter_map_next = "deny" 178 | flat_map_option = "deny" 179 | float_cmp = "deny" 180 | float_cmp_const = "deny" 181 | fn_to_numeric_cast_any = "deny" 182 | format_collect = "deny" 183 | format_push_string = "deny" 184 | future_not_send = "deny" 185 | get_unwrap = "deny" 186 | if_then_some_else_none = "deny" 187 | ignored_unit_patterns = "deny" 188 | impl_trait_in_params = "deny" 189 | implicit_clone = "deny" 190 | imprecise_flops = "deny" 191 | index_refutable_slice = "deny" 192 | infinite_loop = "deny" 193 | into_iter_without_iter = "deny" 194 | invalid_upcast_comparisons = "deny" 195 | items_after_statements = "deny" 196 | iter_filter_is_ok = "deny" 197 | iter_filter_is_some = "deny" 198 | iter_not_returning_iterator = "deny" 199 | iter_with_drain = "deny" 200 | large_futures = "deny" 201 | large_types_passed_by_value = "deny" 202 | linkedlist = "deny" 203 | lossy_float_literal = "deny" 204 | manual_assert = "deny" 205 | manual_instant_elapsed = "deny" 206 | manual_is_power_of_two = "deny" 207 | manual_is_variant_and = "deny" 208 | manual_let_else = "deny" 209 | manual_midpoint = "deny" 210 | manual_string_new = "deny" 211 | map_unwrap_or = "deny" 212 | map_with_unused_argument_over_ranges = "deny" 213 | match_bool = "deny" 214 | match_wild_err_arm = "deny" 215 | mem_forget = "deny" 216 | mismatching_type_param_order = "deny" 217 | missing_assert_message = "deny" 218 | missing_asserts_for_indexing = "deny" 219 | missing_panics_doc = "deny" 220 | missing_safety_doc = "deny" 221 | mixed_read_write_in_expression = "deny" 222 | mut_mut = "deny" 223 | needless_bitwise_bool = "deny" 224 | needless_collect = "deny" 225 | needless_for_each = "deny" 226 | needless_pass_by_ref_mut = "deny" 227 | needless_raw_string_hashes = "deny" 228 | needless_raw_strings = "deny" 229 | negative_feature_names = "deny" 230 | non_zero_suggestions = "deny" 231 | option_as_ref_cloned = "deny" 232 | or_fun_call = "deny" 233 | path_buf_push_overwrite = "deny" 234 | pathbuf_init_then_push = "deny" 235 | precedence_bits = "deny" 236 | pub_underscore_fields = "deny" 237 | pub_without_shorthand = "deny" 238 | range_minus_one = "deny" 239 | range_plus_one = "deny" 240 | rc_buffer = "deny" 241 | rc_mutex = "deny" 242 | read_zero_byte_vec = "deny" 243 | redundant_clone = "deny" 244 | redundant_closure_for_method_calls = "deny" 245 | redundant_feature_names = "deny" 246 | ref_binding_to_reference = "deny" 247 | rest_pat_in_fully_bound_structs = "deny" 248 | return_and_then = "deny" 249 | same_functions_in_if_condition = "deny" 250 | semicolon_if_nothing_returned = "deny" 251 | set_contains_or_insert = "deny" 252 | should_panic_without_expect = "deny" 253 | single_char_pattern = "deny" 254 | single_match_else = "deny" 255 | stable_sort_primitive = "deny" 256 | str_split_at_newline = "deny" 257 | string_add_assign = "deny" 258 | string_lit_as_bytes = "deny" 259 | string_lit_chars_any = "deny" 260 | string_to_string = "deny" 261 | struct_field_names = "deny" 262 | suboptimal_flops = "deny" 263 | trailing_empty_array = "deny" 264 | trait_duplication_in_bounds = "deny" 265 | trivial_regex = "deny" 266 | try_err = "deny" 267 | type_repetition_in_bounds = "deny" 268 | unchecked_duration_subtraction = "deny" 269 | undocumented_unsafe_blocks = "deny" 270 | unicode_not_nfc = "deny" 271 | uninhabited_references = "deny" 272 | uninlined_format_args = "deny" 273 | unnecessary_join = "deny" 274 | unnecessary_literal_bound = "deny" 275 | unnecessary_safety_comment = "deny" 276 | unnecessary_safety_doc = "deny" 277 | unnecessary_self_imports = "deny" 278 | unnecessary_struct_initialization = "deny" 279 | unnecessary_wraps = "deny" 280 | unneeded_field_pattern = "deny" 281 | unnested_or_patterns = "deny" 282 | unused_async = "deny" 283 | unused_peekable = "deny" 284 | unused_result_ok = "deny" 285 | unused_rounding = "deny" 286 | unwrap_used = "deny" 287 | used_underscore_binding = "deny" 288 | used_underscore_items = "deny" 289 | while_float = "deny" 290 | wildcard_dependencies = "deny" 291 | zero_sized_map_values = "deny" 292 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2023-2025 John Thorvald Wodder II 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) 2 | [![CI Status](https://github.com/jwodder/demagnetize-rs/actions/workflows/test.yml/badge.svg)](https://github.com/jwodder/demagnetize-rs/actions/workflows/test.yml) 3 | [![codecov.io](https://codecov.io/gh/jwodder/demagnetize-rs/branch/master/graph/badge.svg)](https://codecov.io/gh/jwodder/demagnetize-rs) 4 | [![Minimum Supported Rust Version](https://img.shields.io/badge/MSRV-1.82-orange)](https://www.rust-lang.org) 5 | [![MIT License](https://img.shields.io/github/license/jwodder/demagnetize-rs.svg)](https://opensource.org/licenses/MIT) 6 | 7 | [GitHub](https://github.com/jwodder/demagnetize-rs) | [crates.io](https://crates.io/crates/demagnetize) | [Issues](https://github.com/jwodder/demagnetize-rs/issues) | [Changelog](https://github.com/jwodder/demagnetize-rs/blob/master/CHANGELOG.md) 8 | 9 | `demagnetize` is a Rust program for converting one or more BitTorrent [magnet 10 | links](https://en.wikipedia.org/wiki/Magnet_URI_scheme) into `.torrent` files 11 | by downloading the torrent info from active peers. 12 | 13 | At the moment, `demagnetize` only supports basic features of the BitTorrent 14 | protocol. The following notable features are supported: 15 | 16 | - BitTorrent protocol v1 17 | - HTTP (including compact and IPv6 extensions) and UDP trackers 18 | - magnet URIs with info hashes encoded in either hexadecimal or base32 19 | - Fast extension ([BEP 6](https://www.bittorrent.org/beps/bep_0006.html)) 20 | - UDP tracker protocol extensions ([BEP 21 | 41](https://www.bittorrent.org/beps/bep_0041.html)) 22 | - MSE/PE Encryption 23 | 24 | The following features are not currently supported but are planned, in no 25 | particular order: 26 | 27 | - Distributed hash tables 28 | - BitTorrent protocol v2 29 | - `x.pe` parameters in magnet links 30 | - uTP 31 | 32 | `demagnetize` is a translation of a Python program by the same author; you can 33 | find the Python version at . 34 | 35 | 36 | Installation 37 | ============ 38 | 39 | Release Assets 40 | -------------- 41 | 42 | Prebuilt binaries for the most common platforms are available as GitHub release 43 | assets. [The page for the latest 44 | release](https://github.com/jwodder/demagnetize-rs/releases/latest) lists these 45 | under "Assets", along with installer scripts for both Unix-like systems and 46 | Windows. 47 | 48 | As an alternative to the installer scripts, if you have 49 | [`cargo-binstall`](https://github.com/cargo-bins/cargo-binstall) on your 50 | system, you can use it to download & install the appropriate release asset for 51 | your system for the latest version of `demagnetize` by running `cargo binstall 52 | demagnetize`. 53 | 54 | Installing from Source 55 | ---------------------- 56 | 57 | If you have [Rust and Cargo 58 | installed](https://www.rust-lang.org/tools/install), you can build the latest 59 | release of `demagnetize` from source and install it in `~/.cargo/bin` by 60 | running: 61 | 62 | cargo install demagnetize 63 | 64 | `demagnetize` has the following Cargo features, selectable via the `--features 65 | ` option to `cargo install`: 66 | 67 | - `native-tls` — Use [`native-tls`](https://github.com/sfackler/rust-native-tls) 68 | for TLS support. This feature is enabled by default. 69 | 70 | - `native-tls-vendored` — Like `native-tls`, but compile a vendored copy of 71 | OpenSSL into `demagnetize` instead of using the platform's copy at runtime. 72 | This makes it possible to build `demagnetize` on one system and run it on 73 | another system that has a different version of OpenSSL. 74 | 75 | This feature has no effect on Windows and macOS, where OpenSSL is not used. 76 | 77 | - `rustls` — Use [`rustls`](https://github.com/rustls/rustls) for TLS support. 78 | When selecting this feature, be sure to also supply the 79 | `--no-default-features` option in order to disable `native-tls`. 80 | 81 | - The release assets are built using this feature. 82 | 83 | 84 | Usage 85 | ===== 86 | 87 | demagnetize [] ... 88 | 89 | The `demagnetize` command has two main general-purpose subcommands, `get` (for 90 | converting a single magnet link) and `batch` (for converting a file of magnet 91 | links). There are also two low-level commands, `query-tracker` (for getting a 92 | list of peers from a single tracker) and `query-peer` (for getting torrent 93 | metadata from a single peer). 94 | 95 | Global Options 96 | -------------- 97 | 98 | - `-c `, `--config ` — Specify the configuration file to use. See 99 | "Configuration" below for the default config file location. 100 | 101 | - `-l `, `--log-level ` — Set the log level to the given value. 102 | Possible values are "`OFF`", "`ERROR`", "`WARN`", "`INFO`", "`DEBUG`", and 103 | "`TRACE`" (all case-insensitive). [default value: `INFO`] 104 | 105 | - `--no-config` — Use the default configuration settings and do not read from 106 | any configuration files 107 | 108 | 109 | `demagnetize get` 110 | ----------------- 111 | 112 | demagnetize [] get [] 113 | 114 | Convert a single magnet link specified on the command line to a `.torrent` 115 | file. (Note that you will likely have to quote the link in order to prevent it 116 | from being interpreted by the shell.) By default, the file is saved at 117 | `{name}.torrent`, where `{name}` is replaced by the value of the `name` field 118 | from the torrent info, but a different path can be set via the `--outfile` 119 | option. 120 | 121 | ### Options 122 | 123 | - `-o PATH`, `--outfile PATH` — Save the `.torrent` file to the given path. 124 | The path may contain a `{name}` placeholder, which will be replaced by the 125 | (sanitized) name of the torrent, and/or a `{hash}` placeholder, which will be 126 | replaced by the torrent's info hash in hexadecimal. Specifying `-` will 127 | cause the torrent to be written to standard output. [default: 128 | `{name}.torrent`] 129 | 130 | 131 | `demagnetize batch` 132 | ------------------- 133 | 134 | demagnetize [] batch [] 135 | 136 | Read magnet links from `` (or from standard input if `` is `-`), 137 | one per line (ignoring empty lines and lines that start with `#`), and convert 138 | each one to a `.torrent` file. By default, each file is saved at 139 | `{name}.torrent`, where `{name}` is replaced by the value of the `name` field 140 | from the torrent info, but a different path can be set via the `--outfile` 141 | option. 142 | 143 | ### Options 144 | 145 | - `-o PATH`, `--outfile PATH` — Save the `.torrent` files to the given path. 146 | The path may contain a `{name}` placeholder, which will be replaced by the 147 | (sanitized) name of each torrent, and/or a `{hash}` placeholder, which will 148 | be replaced by each torrent's info hash in hexadecimal. [default: 149 | `{name}.torrent`] 150 | 151 | 152 | `demagnetize query-tracker` 153 | --------------------------- 154 | 155 | demagnetize [] query-tracker [] 156 | 157 | Query the given tracker (specified as an HTTP or UDP URL) for peers serving the 158 | torrent with the given info hash (specified as a 40-character hex string or 159 | 32-character base32 string), and print out the the retrieved peers' addresses 160 | in the form "IP:PORT". 161 | 162 | ### Options 163 | 164 | - `-J`, `--json` — Print out the peers as JSON objects, one per line 165 | 166 | - `--no-crypto` — Do not tell the tracker anything about our encryption 167 | support. Overrides the `general.encrypt` configuration setting. 168 | 169 | - `--require-crypto` — Tell the tracker that we require peers with encryption 170 | support. Overrides the `general.encrypt` configuration setting. 171 | 172 | - `--support-crypto` — Tell the tracker that we support the encrypted peer 173 | protocol. Overrides the `general.encrypt` configuration setting. 174 | 175 | 176 | `demagnetize query-peer` 177 | ------------------------ 178 | 179 | demagnetize [] query-peer [] 180 | 181 | Query the given peer (specified as an address in "IPv4:PORT" or "[IPv6]:PORT" 182 | format) for the metadata of the torrent with the given info hash (specified as 183 | a 40-character hex string or 32-character base32 string), and save the metadata 184 | to a file. By default, the file is saved at `{name}.torrent`, where `{name}` 185 | is replaced by the value of the `name` field from the torrent info, but a 186 | different path can be set via the `--outfile` option. 187 | 188 | Note that, unlike the `.torrent` files produced by the `get` and `batch` 189 | commands, the files produced by this command will not contain tracker 190 | information. 191 | 192 | ### Options 193 | 194 | - `--encrypt` — Create an encrypted connection to the peer. Overrides the 195 | `general.encrypt` configuration setting. 196 | 197 | - `--no-encrypt` — Create an unencrypted connection to the peer. Overrides the 198 | `general.encrypt` configuration setting. 199 | 200 | - `-o PATH`, `--outfile PATH` — Save the `.torrent` file to the given path. 201 | The path may contain a `{name}` placeholder, which will be replaced by the 202 | (sanitized) name of the torrent, and/or a `{hash}` placeholder, which will be 203 | replaced by the torrent's info hash in hexadecimal. Specifying `-` will 204 | cause the torrent to be written to standard output. [default: 205 | `{name}.torrent`] 206 | 207 | - `--prefer-encrypt` — Attempt to create an encrypted connection to the peer; 208 | if that fails, try again without encryption. Overrides the `general.encrypt` 209 | configuration setting. 210 | 211 | 212 | Configuration 213 | ============= 214 | 215 | `demagnetize` can be configured via a [TOML](https://toml.io) file whose 216 | default location depends on your OS: 217 | 218 | - Linux — `~/.config/demagnetize/config.toml` or `$XDG_CONFIG_HOME/demagnetize/config.toml` 219 | - macOS — `~/Library/Application Support/demagnetize/config.toml` 220 | - Windows — `%USERPROFILE%\AppData\Local\demagnetize\config.toml` 221 | 222 | This file may contain the following tables & keys, all of which are optional: 223 | 224 | - `[general]` — settings that don't fit anywhere more specific 225 | - `batch-jobs` (positive integer; default 50) — the maximum number of 226 | magnet links that the `batch` command will operate on at once 227 | - `encrypt` — Configures when to use MSE/PE encryption when connecting to 228 | peers and what to tell HTTP trackers about encryption support. The 229 | possible options are: 230 | - `"always"` – Always use encryption with peers, and include a 231 | `requirecrypto=1` parameter in announcements to HTTP trackers 232 | - `"prefer"` — Try creating an encrypted connection to a peer first; if 233 | the encryption handshake fails, and the peer does not require 234 | encryption, try again without encryption. Also include a 235 | `supportcrypto=1` parameter in announcements to HTTP trackers. 236 | - This is the default. 237 | - Note that falling back to an unencrypted connection resets the 238 | peer handshake timeout (See `peers.handshake-timeout` below). 239 | - `"if-required"` — Only use encryption if the returning tracker 240 | indicated that the peer requires encryption, and include a 241 | `supportcrypto=1` parameter in announcements to HTTP trackers. 242 | - `"never"` — Do not use encryption; do not attempt to connect to peers 243 | that require encryption; do not include any crypto parameters in 244 | announcements to HTTP trackers 245 | 246 | - `[peers]` — settings for interacting with peers 247 | - `dh-exchange-timeout` (nonnegative integer; default 30) — When performing 248 | the handshake for an encrypted peer connection, wait this many seconds 249 | for the remote peer to send its portion of the Diffie-Hellman key 250 | exchange. 251 | - `handshake-timeout` (nonnegative integer; default 60) — When connecting 252 | to a peer, if the TCP connection, encryption handshake, and BitTorrent 253 | handshake are not all completed within this many seconds, the peer is 254 | abandoned. 255 | - `jobs-per-magnet` (positive integer; default 30) — the maximum number of 256 | peers per magnet link that `demagnetize` will communicate with at once 257 | 258 | - `[trackers]` — settings for interacting with trackers 259 | - `announce-timeout` (nonnegative integer; default 30) — When sending a 260 | "started" announcement to a tracker & receiving a list of peers in 261 | response, if the task does not complete within this many seconds, the 262 | tracker is abandoned. 263 | - `jobs-per-magnet` (positive integer; default 30) — the maximum number of 264 | trackers per magnet link that `demagnetize` will communicate with at once 265 | - `local-port` — the port number that `demagnetize` will tell trackers it's 266 | receiving peer connections on 267 | - This can be either a port number or a string containing two port 268 | numbers separated by a hyphen (in which case a port in the given 269 | inclusive range will be chosen at random). The default is 270 | `"1025-65535"`, which selects any nonprivileged port at random. 271 | - Note that `demagnetize` does not actually use the port in question, 272 | and no attempt is made to ensure the port is not already in use. On 273 | the other hand, `demagnetize` sends a "stop" announcement to each 274 | tracker immediately after receiving the list of peers, so hopefully 275 | no other peers will see the port number. 276 | - `numwant` (positive integer; default 50) — the number of peers to request 277 | from each tracker 278 | - `shutdown-timeout` (nonnegative integer; default 3) — At the end of 279 | program operation, wait this many seconds for any outstanding "stopped" 280 | announcements to complete; any tasks still running after the timeout are 281 | forcibly cancelled. 282 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::process::ExitCode; 2 | use thiserror::Error; 3 | 4 | fn main() -> ExitCode { 5 | let (major, minor, patch) = match get_version_bits() { 6 | Ok(bits) => bits, 7 | Err(e) => { 8 | eprintln!("{e}"); 9 | return ExitCode::FAILURE; 10 | } 11 | }; 12 | let mut prefix = String::from("-DM-"); 13 | prefix.push_str(&major); 14 | if minor.len() < 2 { 15 | prefix.push('0'); 16 | } 17 | prefix.push_str(&minor); 18 | prefix.push_str(&patch); 19 | prefix.push('-'); 20 | println!("cargo::rustc-env=PEER_ID_PREFIX={prefix}"); 21 | ExitCode::SUCCESS 22 | } 23 | 24 | fn get_version_bits() -> Result<(String, String, String), GetEnvError> { 25 | let major = getenv("CARGO_PKG_VERSION_MAJOR")?; 26 | let minor = getenv("CARGO_PKG_VERSION_MINOR")?; 27 | let patch = getenv("CARGO_PKG_VERSION_PATCH")?; 28 | Ok((major, minor, patch)) 29 | } 30 | 31 | #[derive(Clone, Debug, Eq, Error, PartialEq)] 32 | #[error("{varname} envvar not set: {source}")] 33 | struct GetEnvError { 34 | varname: &'static str, 35 | source: std::env::VarError, 36 | } 37 | 38 | fn getenv(varname: &'static str) -> Result { 39 | std::env::var(varname).map_err(|source| GetEnvError { varname, source }) 40 | } 41 | -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | allow-unwrap-in-tests = true 2 | doc-valid-idents = ["BitTorrent", ".."] 3 | check-incompatible-msrv-in-tests = true 4 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [graph] 2 | targets = [] 3 | all-features = true 4 | no-default-features = false 5 | 6 | [advisories] 7 | version = 2 8 | yanked = "deny" 9 | ignore = [ 10 | # Advisories stemming from bendy's use of "failure" (can't do anything 11 | # about that): 12 | "RUSTSEC-2019-0036", 13 | "RUSTSEC-2020-0036", 14 | ] 15 | 16 | [bans] 17 | multiple-versions = "allow" 18 | wildcards = "allow" 19 | 20 | [licenses] 21 | version = 2 22 | allow = [ 23 | "Apache-2.0", 24 | "BSD-2-Clause", 25 | "BSD-3-Clause", 26 | "CDLA-Permissive-2.0", 27 | "ISC", 28 | "MIT", 29 | "MPL-2.0", 30 | "OpenSSL", 31 | "Unicode-3.0", 32 | "Unicode-DFS-2016", 33 | "Unlicense", 34 | ] 35 | unused-allowed-license = "allow" 36 | 37 | [[licenses.clarify]] 38 | name = "ring" 39 | expression = "ISC AND OpenSSL AND MIT" 40 | license-files = [ 41 | { path = "LICENSE", hash = 0xbd0eed23 } 42 | ] 43 | 44 | [sources] 45 | unknown-git = "deny" 46 | unknown-registry = "deny" 47 | -------------------------------------------------------------------------------- /dist-workspace.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["cargo:."] 3 | 4 | [dist] 5 | cargo-dist-version = "0.28.0" 6 | ci = "github" 7 | default-features = false 8 | features = ["rustls"] 9 | include = ["THIRDPARTY.toml"] 10 | install-path = "CARGO_HOME" 11 | install-updater = false 12 | installers = ["shell", "powershell"] 13 | targets = [ 14 | "aarch64-apple-darwin", 15 | "aarch64-unknown-linux-gnu", 16 | "x86_64-apple-darwin", 17 | "x86_64-pc-windows-msvc", 18 | "x86_64-unknown-linux-gnu", 19 | ] 20 | 21 | [dist.github-custom-runners] 22 | global = "ubuntu-22.04" 23 | 24 | [dist.github-custom-runners.aarch64-unknown-linux-gnu] 25 | runner = "ubuntu-22.04" 26 | 27 | [dist.github-custom-runners.x86_64-unknown-linux-gnu] 28 | runner = "ubuntu-22.04" 29 | -------------------------------------------------------------------------------- /src/app.rs: -------------------------------------------------------------------------------- 1 | use crate::asyncutil::ShutdownGroup; 2 | use crate::config::Config; 3 | use crate::consts::PEER_ID_PREFIX; 4 | use crate::peer::CryptoMode; 5 | use crate::types::{Key, PeerId}; 6 | use rand::Rng; 7 | use std::fmt; 8 | 9 | #[derive(Debug)] 10 | pub(crate) struct App { 11 | pub(crate) cfg: Config, 12 | pub(crate) local: LocalPeer, 13 | pub(crate) shutdown_group: ShutdownGroup, 14 | } 15 | 16 | impl App { 17 | pub(crate) fn new(cfg: Config, mut rng: R) -> App { 18 | let id = PeerId::generate(PEER_ID_PREFIX, &mut rng); 19 | let key = rng.random::(); 20 | let port = cfg.trackers.local_port.generate(&mut rng); 21 | let local = LocalPeer { id, key, port }; 22 | App { 23 | cfg, 24 | local, 25 | shutdown_group: ShutdownGroup::new(), 26 | } 27 | } 28 | 29 | pub(crate) fn get_crypto_mode(&self, requires_crypto: bool) -> Option { 30 | self.cfg.general.encrypt.get_crypto_mode(requires_crypto) 31 | } 32 | 33 | pub(crate) async fn shutdown(&self) { 34 | self.shutdown_group 35 | .shutdown(self.cfg.trackers.shutdown_timeout) 36 | .await; 37 | } 38 | } 39 | 40 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 41 | pub(crate) struct LocalPeer { 42 | pub(crate) id: PeerId, 43 | pub(crate) key: Key, 44 | pub(crate) port: u16, 45 | } 46 | 47 | impl fmt::Display for LocalPeer { 48 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 49 | write!( 50 | f, 51 | "id = {}, key = {}, port = {}", 52 | self.id, self.key, self.port 53 | ) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/asyncutil/mod.rs: -------------------------------------------------------------------------------- 1 | mod shutdown_group; 2 | mod unique_stream; 3 | mod worker_nursery; 4 | pub(crate) use self::shutdown_group::*; 5 | pub(crate) use self::unique_stream::*; 6 | pub(crate) use self::worker_nursery::*; 7 | -------------------------------------------------------------------------------- /src/asyncutil/shutdown_group.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::time::Duration; 3 | use tokio::time::timeout; 4 | use tokio_util::{sync::CancellationToken, task::TaskTracker}; 5 | 6 | #[derive(Debug, Default)] 7 | pub(crate) struct ShutdownGroup { 8 | tracker: TaskTracker, 9 | token: CancellationToken, 10 | } 11 | 12 | impl ShutdownGroup { 13 | pub(crate) fn new() -> Self { 14 | ShutdownGroup { 15 | tracker: TaskTracker::new(), 16 | token: CancellationToken::new(), 17 | } 18 | } 19 | 20 | pub(crate) fn spawn(&self, func: F) 21 | where 22 | F: FnOnce(CancellationToken) -> Fut, 23 | Fut: Future + Send + 'static, 24 | Fut::Output: Send + 'static, 25 | { 26 | let future = func(self.token.clone()); 27 | self.tracker.spawn(future); 28 | } 29 | 30 | async fn join(&self) { 31 | self.tracker.close(); 32 | self.tracker.wait().await; 33 | } 34 | 35 | pub(crate) async fn shutdown(&self, duration: Duration) { 36 | if timeout(duration, self.join()).await.is_err() { 37 | self.token.cancel(); 38 | } 39 | self.join().await; 40 | } 41 | } 42 | 43 | #[cfg(test)] 44 | mod tests { 45 | use super::*; 46 | use std::sync::atomic::{AtomicBool, Ordering}; 47 | use std::sync::Arc; 48 | 49 | #[tokio::test] 50 | async fn test_shutdown_group() { 51 | let group = ShutdownGroup::new(); 52 | let task1_finished = Arc::new(AtomicBool::new(false)); 53 | let my_finished = task1_finished.clone(); 54 | group.spawn(|token| async move { 55 | tokio::select! { 56 | () = token.cancelled() => (), 57 | () = futures_util::future::ready(()) => my_finished.store(true, Ordering::Release), 58 | } 59 | }); 60 | let task2_cancelled = Arc::new(AtomicBool::new(false)); 61 | let my_cancelled = task2_cancelled.clone(); 62 | group.spawn(|token| async move { 63 | tokio::select! { 64 | () = token.cancelled() => my_cancelled.store(true, Ordering::Release), 65 | () = tokio::time::sleep(Duration::from_secs(10)) => (), 66 | } 67 | }); 68 | group.shutdown(Duration::from_secs(1)).await; 69 | assert!(task1_finished.load(Ordering::Acquire)); 70 | assert!(task2_cancelled.load(Ordering::Acquire)); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/asyncutil/unique_stream.rs: -------------------------------------------------------------------------------- 1 | use futures_util::Stream; 2 | use pin_project_lite::pin_project; 3 | use std::collections::HashSet; 4 | use std::hash::Hash; 5 | use std::pin::Pin; 6 | use std::task::{ready, Context, Poll}; 7 | 8 | pub(crate) trait UniqueByExt: Stream { 9 | fn unique_by(self, keyfunc: F) -> UniqueByStream 10 | where 11 | Self: Sized, 12 | F: Fn(&Self::Item) -> K, 13 | K: Eq + Hash, 14 | { 15 | UniqueByStream::new(self, keyfunc) 16 | } 17 | } 18 | 19 | impl UniqueByExt for S {} 20 | 21 | pin_project! { 22 | #[derive(Clone, Debug)] 23 | #[must_use = "streams do nothing unless polled"] 24 | pub(crate) struct UniqueByStream { 25 | #[pin] 26 | inner: S, 27 | keyfunc: F, 28 | seen: HashSet, 29 | } 30 | } 31 | 32 | impl UniqueByStream { 33 | fn new(inner: S, keyfunc: F) -> Self { 34 | UniqueByStream { 35 | inner, 36 | keyfunc, 37 | seen: HashSet::new(), 38 | } 39 | } 40 | } 41 | 42 | impl Stream for UniqueByStream 43 | where 44 | S: Stream, 45 | F: Fn(&S::Item) -> K, 46 | K: Eq + Hash, 47 | { 48 | type Item = S::Item; 49 | 50 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 51 | let mut this = self.project(); 52 | loop { 53 | match ready!(this.inner.as_mut().poll_next(cx)) { 54 | Some(value) => { 55 | if this.seen.insert((this.keyfunc)(&value)) { 56 | return Some(value).into(); 57 | } 58 | } 59 | None => return None.into(), 60 | } 61 | } 62 | } 63 | } 64 | 65 | #[cfg(test)] 66 | mod tests { 67 | use super::*; 68 | use futures_util::stream::{iter, StreamExt}; 69 | 70 | #[tokio::test] 71 | async fn test_unique_by_stream() { 72 | // 10 = 0b1010 = 2 73 | // 20 = 0b10100 = 2 74 | // 30 = 0b11110 = 4 75 | let stream = iter([4u32, 10, 20, 30, 8]).unique_by(|i| i.count_ones()); 76 | assert_eq!(stream.collect::>().await, vec![4, 10, 30]); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/asyncutil/worker_nursery.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | use futures_util::{future::BoxFuture, FutureExt, Stream}; 3 | use pin_project_lite::pin_project; 4 | use std::fmt; 5 | use std::future::Future; 6 | use std::num::NonZeroUsize; 7 | use std::pin::Pin; 8 | use std::sync::{ 9 | atomic::{AtomicBool, Ordering}, 10 | Arc, 11 | }; 12 | use std::task::{ready, Context, Poll}; 13 | use tokio::{sync::mpsc, task::JoinSet}; 14 | 15 | type UnwindResult = Result>; 16 | 17 | /// A handle for executing futures in a task group/nursery that uses a fixed 18 | /// number of worker tasks to await the futures. 19 | /// 20 | /// `WorkerNursery` is cloneable and sendable, and so it can be used to spawn 21 | /// futures from inside other tasks in the nursery. The nursery returned by 22 | /// [`WorkerNursery::new()`] and all clones thereof must be dropped before the 23 | /// corresponding [`WorkerNurseryStream`] can yield `None`. 24 | #[derive(Debug)] 25 | pub(crate) struct WorkerNursery { 26 | inner: async_channel::Sender>, 27 | done: Arc, 28 | } 29 | 30 | impl WorkerNursery { 31 | /// Create a new nursery with `workers` worker tasks and return a handle 32 | /// for spawning futures and a [`Stream`] of future return values. `T` is 33 | /// the `Output` type of the futures that will be spawned in the nursery. 34 | pub(crate) fn new(workers: NonZeroUsize) -> (WorkerNursery, WorkerNurseryStream) { 35 | let (input_sender, input_receiver) = async_channel::unbounded::>(); 36 | let (output_sender, output_receiver) = mpsc::unbounded_channel(); 37 | let mut tasks = JoinSet::new(); 38 | let done = Arc::new(AtomicBool::new(false)); 39 | for _ in 0..workers.get() { 40 | tasks.spawn({ 41 | let input = input_receiver.clone(); 42 | let output = output_sender.clone(); 43 | let done = done.clone(); 44 | async move { 45 | while let Ok(fut) = input.recv().await { 46 | if done.load(Ordering::SeqCst) { 47 | break; 48 | } 49 | let r = std::panic::AssertUnwindSafe(fut).catch_unwind().await; 50 | if output.send(r).is_err() { 51 | break; 52 | } 53 | } 54 | } 55 | }); 56 | } 57 | ( 58 | WorkerNursery { 59 | inner: input_sender, 60 | done: done.clone(), 61 | }, 62 | WorkerNurseryStream { 63 | inner: output_receiver, 64 | closer: Closer(input_receiver), 65 | done, 66 | _tasks: tasks, 67 | }, 68 | ) 69 | } 70 | } 71 | 72 | impl WorkerNursery { 73 | /// Spawn a future that returns `T` in the nursery. Errors if the nursery 74 | /// is closed. 75 | pub(crate) fn spawn(&self, fut: Fut) -> Result<(), SpawnError> 76 | where 77 | Fut: Future + Send + 'static, 78 | { 79 | self.inner.try_send(fut.boxed()).map_err(|_| SpawnError) 80 | } 81 | 82 | /// Closes the nursery. Any further calls to [`spawn()`](Self::spawn) will 83 | /// return an error. 84 | /// 85 | /// Returns `true` if this call has closed the nursery and it was not 86 | /// closed already. 87 | /// 88 | /// Any pending futures will still be processed after calling `close()`. 89 | pub(crate) fn close(&self) -> bool { 90 | self.inner.close() 91 | } 92 | 93 | /// Returns `true` if the nursery is closed. 94 | pub(crate) fn is_closed(&self) -> bool { 95 | self.inner.is_closed() 96 | } 97 | 98 | /// Calls [`close()`](Self::close) and additionally instructs the workers 99 | /// to not process any pending futures. Any futures currently being 100 | /// processed are still processed to completion. 101 | /// 102 | /// Returns `true` if this call has shut down the nursery and it was not 103 | /// shut down already. 104 | pub(crate) fn shutdown(&self) -> bool { 105 | self.close(); 106 | !self.done.swap(true, Ordering::SeqCst) 107 | } 108 | 109 | /// Returns `true` if the nursery is shut down. 110 | pub(crate) fn is_shutdown(&self) -> bool { 111 | self.done.load(Ordering::SeqCst) 112 | } 113 | 114 | /// Returns `true` if the nursery's input channel is empty. 115 | pub(crate) fn is_empty(&self) -> bool { 116 | self.inner.is_empty() 117 | } 118 | 119 | /// Returns the number of pending futures in the nursery's input channel. 120 | pub(crate) fn len(&self) -> usize { 121 | self.inner.len() 122 | } 123 | } 124 | 125 | // Clone can't be derived, as that would erroneously add `T: Clone` bounds to 126 | // the impl. 127 | impl Clone for WorkerNursery { 128 | fn clone(&self) -> WorkerNursery { 129 | WorkerNursery { 130 | inner: self.inner.clone(), 131 | done: self.done.clone(), 132 | } 133 | } 134 | } 135 | 136 | // pin_project! lets us call poll_recv() in poll_next() without even calling 137 | // project(). Not sure how. 138 | pin_project! { 139 | /// A [`Stream`] of the values returned by the tasks spawned in a worker 140 | /// nursery. 141 | /// 142 | /// The corresponding [`WorkerNursery`] and all clones thereof must be 143 | /// dropped before the stream can yield `None`. 144 | /// 145 | /// When a `WorkerNurseryStream` is dropped, all tasks in the nursery are 146 | /// aborted, and the nursery is closed. 147 | #[derive(Debug)] 148 | pub(crate) struct WorkerNurseryStream { 149 | inner: mpsc::UnboundedReceiver>, 150 | closer: Closer, 151 | done: Arc, 152 | _tasks: JoinSet<()>, 153 | } 154 | } 155 | 156 | impl WorkerNurseryStream { 157 | /// Receives the output from the next input future to complete execution. 158 | /// Returns `None` if all input futures have been executed and the nursery 159 | /// is closed. 160 | /// 161 | /// # Panics 162 | /// 163 | /// If the stream receives a result from a future that panicked, this 164 | /// method resumes unwinding the panic. 165 | pub(crate) async fn recv(&mut self) -> Option { 166 | match self.inner.recv().await? { 167 | Ok(r) => Some(r), 168 | Err(e) => std::panic::resume_unwind(e), 169 | } 170 | } 171 | 172 | /// Tries to receive the next result for this stream. 173 | /// 174 | /// This method returns the [`Empty`] error if the stream is currently 175 | /// empty but the nursery is still open. 176 | /// 177 | /// This method returns the [`Done`] error if the stream is currently empty 178 | /// and the nursery is closed. 179 | /// 180 | /// Unlike the [`poll_recv`] method, this method will never return an 181 | /// [`Empty`] error spuriously. 182 | /// 183 | /// [`Empty`]: crate::TryRecvError::Empty 184 | /// [`Done`]: crate::TryRecvError::Done 185 | /// [`poll_recv`]: Self::poll_recv 186 | /// 187 | /// # Panics 188 | /// 189 | /// If the stream receives a result from a future that panicked, this method 190 | /// resumes unwinding the panic. 191 | pub(crate) fn try_recv(&mut self) -> Result { 192 | match self.inner.try_recv()? { 193 | Ok(r) => Ok(r), 194 | Err(e) => std::panic::resume_unwind(e), 195 | } 196 | } 197 | } 198 | 199 | impl WorkerNurseryStream { 200 | /// Closes the nursery. Any further calls to [`WorkerNursery::spawn()`] 201 | /// will return an error. 202 | /// 203 | /// Returns `true` if this call has closed the nursery and it was not 204 | /// closed already. 205 | /// 206 | /// Any pending futures will still be processed after calling `close()`. 207 | pub(crate) fn close(&self) -> bool { 208 | self.closer.close() 209 | } 210 | 211 | /// Returns `true` if the nursery is closed, meaning either that `close()` 212 | /// has been called on a [`WorkerNursery`] or the [`WorkerNurseryStream`] 213 | /// or that all [`WorkerNursery`] stream clones have been dropped. 214 | pub(crate) fn is_closed(&self) -> bool { 215 | self.closer.is_closed() 216 | } 217 | 218 | /// Calls [`close()`](Self::close) and additionally instructs the worker 219 | /// tasks to not process any pending futures. Any futures currently being 220 | /// processed are still processed to completion. 221 | /// 222 | /// Returns `true` if this call has shut down the nursery and it was not 223 | /// shut down already. 224 | pub(crate) fn shutdown(&self) -> bool { 225 | self.close(); 226 | !self.done.swap(true, Ordering::SeqCst) 227 | } 228 | 229 | /// Returns `true` if the nursery is shut down. 230 | pub(crate) fn is_shutdown(&self) -> bool { 231 | self.done.load(Ordering::SeqCst) 232 | } 233 | 234 | /// Returns `true` if the output stream is empty. 235 | pub(crate) fn is_empty(&self) -> bool { 236 | self.inner.is_empty() 237 | } 238 | 239 | /// Returns the number of pending outputs in the stream. 240 | pub(crate) fn len(&self) -> usize { 241 | self.inner.len() 242 | } 243 | 244 | /// Polls to receive the next result from this stream. 245 | /// 246 | /// This method returns: 247 | /// 248 | /// * `Poll::Pending` if no results are available but the nursery is not 249 | /// closed, or if a spurious failure happens. 250 | /// * `Poll::Ready(Some(message))` if a result is available. 251 | /// * `Poll::Ready(None)` if the nursery has been closed and all results 252 | /// have been received. 253 | /// 254 | /// # Panics 255 | /// 256 | /// If the stream receives a result from a future that panicked, this 257 | /// method resumes unwinding the panic. 258 | pub(crate) fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { 259 | match ready!(self.inner.poll_recv(cx)) { 260 | Some(Ok(r)) => Some(r).into(), 261 | Some(Err(e)) => std::panic::resume_unwind(e), 262 | None => None.into(), 263 | } 264 | } 265 | } 266 | 267 | impl Stream for WorkerNurseryStream { 268 | type Item = T; 269 | 270 | /// Poll for one of the worker tasks to finish processing an input future, 271 | /// and return the output. 272 | /// 273 | /// # Panics 274 | /// 275 | /// If the stream receives a result from a future that panicked, this method 276 | /// resumes unwinding the panic. 277 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 278 | self.poll_recv(cx) 279 | } 280 | } 281 | 282 | // This type is needed because putting the Drop impl on WorkerNurseryStream 283 | // instead conflicts with pin_project_lite. 284 | #[derive(Debug)] 285 | struct Closer(async_channel::Receiver>); 286 | 287 | impl Closer { 288 | fn close(&self) -> bool { 289 | self.0.close() 290 | } 291 | 292 | fn is_closed(&self) -> bool { 293 | self.0.is_closed() 294 | } 295 | } 296 | 297 | impl Drop for Closer { 298 | fn drop(&mut self) { 299 | self.close(); 300 | } 301 | } 302 | 303 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 304 | pub(crate) struct SpawnError; 305 | 306 | impl fmt::Display for SpawnError { 307 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 308 | write!(f, "could not spawn future in nursery as it is closed") 309 | } 310 | } 311 | 312 | impl std::error::Error for SpawnError {} 313 | 314 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 315 | pub(crate) enum TryRecvError { 316 | Empty, 317 | Done, 318 | } 319 | 320 | impl fmt::Display for TryRecvError { 321 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 322 | match self { 323 | TryRecvError::Empty => write!(f, "nursery output stream is empty"), 324 | TryRecvError::Done => write!(f, "nursery is done"), 325 | } 326 | } 327 | } 328 | 329 | impl std::error::Error for TryRecvError {} 330 | 331 | impl From for TryRecvError { 332 | fn from(e: mpsc::error::TryRecvError) -> TryRecvError { 333 | match e { 334 | mpsc::error::TryRecvError::Empty => TryRecvError::Empty, 335 | mpsc::error::TryRecvError::Disconnected => TryRecvError::Done, 336 | } 337 | } 338 | } 339 | 340 | #[cfg(test)] 341 | mod tests { 342 | use super::*; 343 | use futures_util::StreamExt; 344 | use tokio::sync::oneshot; 345 | 346 | #[test] 347 | fn nursery_is_send() { 348 | #[allow(dead_code)] 349 | fn require_send(_t: T) {} 350 | 351 | #[allow(dead_code)] 352 | fn check_nursery_send() { 353 | let (nursery, _) = WorkerNursery::::new(NonZeroUsize::new(42).unwrap()); 354 | require_send(nursery); 355 | } 356 | } 357 | 358 | #[tokio::test] 359 | async fn collect() { 360 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 361 | for i in 0..20 { 362 | nursery.spawn(std::future::ready(i)).unwrap(); 363 | } 364 | assert!(!stream.is_closed()); 365 | drop(nursery); 366 | assert!(stream.is_closed()); 367 | let mut values = stream.collect::>().await; 368 | values.sort_unstable(); 369 | assert_eq!(values, (0..20).collect::>()); 370 | } 371 | 372 | #[tokio::test] 373 | async fn reraise_panic_recv() { 374 | let (nursery, mut stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 375 | nursery 376 | .spawn(async { panic!("I can't take this anymore!") }) 377 | .unwrap(); 378 | drop(nursery); 379 | let r = std::panic::AssertUnwindSafe(stream.recv()) 380 | .catch_unwind() 381 | .await; 382 | assert!(r.is_err()); 383 | } 384 | 385 | #[tokio::test] 386 | async fn reraise_panic_next() { 387 | let (nursery, mut stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 388 | nursery 389 | .spawn(async { panic!("I can't take this anymore!") }) 390 | .unwrap(); 391 | drop(nursery); 392 | let r = std::panic::AssertUnwindSafe(stream.next()) 393 | .catch_unwind() 394 | .await; 395 | assert!(r.is_err()); 396 | } 397 | 398 | #[tokio::test] 399 | async fn close_receiver() { 400 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 401 | for i in 0..5 { 402 | nursery.spawn(std::future::ready(i)).unwrap(); 403 | } 404 | assert!(!nursery.is_shutdown()); 405 | assert!(!stream.is_shutdown()); 406 | assert!(!stream.is_closed()); 407 | assert!(!nursery.is_closed()); 408 | assert!(stream.close()); 409 | assert!(nursery.spawn(std::future::ready(5)).is_err()); 410 | assert!(!nursery.is_shutdown()); 411 | assert!(!stream.is_shutdown()); 412 | assert!(stream.is_closed()); 413 | assert!(nursery.is_closed()); 414 | drop(nursery); 415 | let mut values = stream.collect::>().await; 416 | values.sort_unstable(); 417 | assert_eq!(values, (0..5).collect::>()); 418 | } 419 | 420 | #[tokio::test] 421 | async fn close_sender() { 422 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 423 | for i in 0..5 { 424 | nursery.spawn(std::future::ready(i)).unwrap(); 425 | } 426 | assert!(!nursery.is_shutdown()); 427 | assert!(!stream.is_shutdown()); 428 | assert!(!stream.is_closed()); 429 | assert!(!nursery.is_closed()); 430 | assert!(nursery.close()); 431 | assert!(nursery.spawn(std::future::ready(5)).is_err()); 432 | assert!(!nursery.is_shutdown()); 433 | assert!(!stream.is_shutdown()); 434 | assert!(stream.is_closed()); 435 | assert!(nursery.is_closed()); 436 | drop(nursery); 437 | let mut values = stream.collect::>().await; 438 | values.sort_unstable(); 439 | assert_eq!(values, (0..5).collect::>()); 440 | } 441 | 442 | #[tokio::test] 443 | async fn close_on_shutdown() { 444 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 445 | for i in 0..5 { 446 | nursery.spawn(std::future::ready(i)).unwrap(); 447 | } 448 | assert!(!nursery.is_shutdown()); 449 | assert!(!stream.is_shutdown()); 450 | assert!(!stream.is_closed()); 451 | assert!(!nursery.is_closed()); 452 | assert!(stream.shutdown()); 453 | assert!(nursery.spawn(std::future::ready(5)).is_err()); 454 | assert!(nursery.is_shutdown()); 455 | assert!(stream.is_shutdown()); 456 | assert!(stream.is_closed()); 457 | assert!(nursery.is_closed()); 458 | assert!(!nursery.shutdown()); 459 | assert!(!stream.shutdown()); 460 | assert!(nursery.is_shutdown()); 461 | assert!(stream.is_shutdown()); 462 | assert!(stream.is_closed()); 463 | assert!(nursery.is_closed()); 464 | drop(nursery); 465 | // Note that, because shutdown() prevents queued tasks from running, 466 | // the stream will nondeterministically return a subset of the 467 | // incremented inputs. 468 | assert!(stream.all(|n| async move { (1..6).contains(&n) }).await); 469 | } 470 | 471 | #[tokio::test] 472 | async fn dropping_receiver_closes_sender() { 473 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 474 | assert!(!nursery.is_closed()); 475 | drop(stream); 476 | assert!(nursery.is_closed()); 477 | assert!(nursery.spawn(std::future::ready(5)).is_err()); 478 | } 479 | 480 | #[tokio::test] 481 | async fn queued_run_after_close() { 482 | let (nursery, mut stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 483 | let mut txes = Vec::new(); 484 | for _ in 0..10 { 485 | let (tx, rx) = oneshot::channel(); 486 | nursery.spawn(async move { rx.await.unwrap() }).unwrap(); 487 | txes.push(tx); 488 | } 489 | assert_eq!(stream.try_recv(), Err(TryRecvError::Empty)); 490 | nursery.close(); 491 | for (i, tx) in txes.into_iter().enumerate() { 492 | tx.send(i).unwrap(); 493 | } 494 | let mut values = stream.collect::>().await; 495 | values.sort_unstable(); 496 | assert_eq!(values, (0..10).collect::>()); 497 | } 498 | 499 | #[tokio::test] 500 | async fn queued_not_run_after_recv_shutdown() { 501 | let (nursery, mut stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 502 | let mut txes = Vec::new(); 503 | for _ in 0..10 { 504 | let (tx, rx) = oneshot::channel(); 505 | nursery.spawn(async move { rx.await.unwrap() }).unwrap(); 506 | txes.push(tx); 507 | } 508 | // 509 | tokio::task::yield_now().await; 510 | assert_eq!(stream.try_recv(), Err(TryRecvError::Empty)); 511 | stream.shutdown(); 512 | for (i, tx) in txes.into_iter().enumerate() { 513 | let _ = tx.send(i); 514 | } 515 | let mut values = stream.collect::>().await; 516 | values.sort_unstable(); 517 | assert_eq!(values, (0..5).collect::>()); 518 | } 519 | 520 | #[tokio::test] 521 | async fn queued_not_run_after_send_shutdown() { 522 | let (nursery, mut stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 523 | let mut txes = Vec::new(); 524 | for _ in 0..10 { 525 | let (tx, rx) = oneshot::channel(); 526 | nursery.spawn(async move { rx.await.unwrap() }).unwrap(); 527 | txes.push(tx); 528 | } 529 | // 530 | tokio::task::yield_now().await; 531 | assert_eq!(stream.try_recv(), Err(TryRecvError::Empty)); 532 | nursery.shutdown(); 533 | for (i, tx) in txes.into_iter().enumerate() { 534 | let _ = tx.send(i); 535 | } 536 | let mut values = stream.collect::>().await; 537 | values.sort_unstable(); 538 | assert_eq!(values, (0..5).collect::>()); 539 | } 540 | 541 | #[tokio::test] 542 | async fn nested_spawn() { 543 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 544 | let inner = nursery.clone(); 545 | nursery 546 | .spawn(async move { 547 | inner.spawn(std::future::ready(0)).unwrap(); 548 | std::future::ready(1).await 549 | }) 550 | .unwrap(); 551 | nursery.spawn(std::future::ready(2)).unwrap(); 552 | nursery.spawn(std::future::ready(3)).unwrap(); 553 | drop(nursery); 554 | let mut values = stream.collect::>().await; 555 | values.sort_unstable(); 556 | assert_eq!(values, vec![0, 1, 2, 3]); 557 | } 558 | 559 | #[tokio::test] 560 | async fn no_close_until_drop() { 561 | let (nursery, mut nursery_stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 562 | nursery.spawn(std::future::ready(1)).unwrap(); 563 | nursery.spawn(std::future::ready(2)).unwrap(); 564 | nursery.spawn(std::future::ready(3)).unwrap(); 565 | let mut values = Vec::new(); 566 | values.push(nursery_stream.next().await.unwrap()); 567 | values.push(nursery_stream.next().await.unwrap()); 568 | values.push(nursery_stream.next().await.unwrap()); 569 | values.sort_unstable(); 570 | assert_eq!(values, vec![1, 2, 3]); 571 | assert_eq!(nursery_stream.try_recv(), Err(TryRecvError::Empty)); 572 | drop(nursery); 573 | //assert_eq!(nursery_stream.try_recv(), Err(TryRecvError::Done)); 574 | let r = tokio::time::timeout(std::time::Duration::from_millis(100), nursery_stream.next()) 575 | .await; 576 | assert_eq!(r, Ok(None)); 577 | } 578 | 579 | #[tokio::test] 580 | async fn drop_tasks_on_drop_stream() { 581 | enum Void {} 582 | 583 | let (nursery, nursery_stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 584 | let (sender, receiver) = oneshot::channel::(); 585 | nursery 586 | .spawn({ 587 | async move { 588 | std::future::pending::<()>().await; 589 | drop(sender); 590 | } 591 | }) 592 | .unwrap(); 593 | drop(nursery); 594 | drop(nursery_stream); 595 | assert!(receiver.await.is_err()); 596 | } 597 | 598 | #[tokio::test] 599 | async fn nest_nurseries() { 600 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 601 | nursery 602 | .spawn(async { 603 | let (nursery, stream) = WorkerNursery::new(NonZeroUsize::new(5).unwrap()); 604 | nursery.spawn(std::future::ready(1)).unwrap(); 605 | nursery.spawn(std::future::ready(2)).unwrap(); 606 | nursery.spawn(std::future::ready(3)).unwrap(); 607 | drop(nursery); 608 | stream.fold(0, |accum, i| async move { accum + i }).await 609 | }) 610 | .unwrap(); 611 | nursery.spawn(std::future::ready(4)).unwrap(); 612 | nursery.spawn(std::future::ready(5)).unwrap(); 613 | drop(nursery); 614 | let mut values = stream.collect::>().await; 615 | values.sort_unstable(); 616 | assert_eq!(values, vec![4, 5, 6]); 617 | } 618 | } 619 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use crate::peer::CryptoMode; 2 | use crate::tracker::TrackerCrypto; 3 | use rand::Rng; 4 | use serde::{ 5 | de::{Deserializer, Unexpected}, 6 | Deserialize, 7 | }; 8 | use std::fmt; 9 | use std::num::{NonZeroU32, NonZeroUsize}; 10 | use std::path::{Path, PathBuf}; 11 | use std::time::Duration; 12 | use thiserror::Error; 13 | 14 | #[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)] 15 | pub(crate) struct Config { 16 | #[serde(default)] 17 | pub(crate) general: GeneralConfig, 18 | #[serde(default)] 19 | pub(crate) trackers: TrackersConfig, 20 | #[serde(default)] 21 | pub(crate) peers: PeersConfig, 22 | } 23 | 24 | impl Config { 25 | // Returns `None` if $HOME cannot be determined 26 | pub(crate) fn default_path() -> Option { 27 | Some( 28 | dirs::config_local_dir()? 29 | .join("demagnetize") 30 | .join("config.toml"), 31 | ) 32 | } 33 | 34 | pub(crate) fn load>(path: P) -> Result { 35 | let content = std::fs::read_to_string(path)?; 36 | toml::from_str(&content).map_err(Into::into) 37 | } 38 | } 39 | 40 | #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] 41 | #[serde(rename_all = "kebab-case")] 42 | pub(crate) struct GeneralConfig { 43 | /// Maximum number of magnet links to operate on at once in batch mode 44 | #[serde(default = "default_batch_jobs")] 45 | pub(crate) batch_jobs: NonZeroUsize, 46 | 47 | #[serde(default)] 48 | pub(crate) encrypt: CryptoPreference, 49 | } 50 | 51 | impl Default for GeneralConfig { 52 | fn default() -> GeneralConfig { 53 | GeneralConfig { 54 | batch_jobs: default_batch_jobs(), 55 | encrypt: CryptoPreference::default(), 56 | } 57 | } 58 | } 59 | 60 | #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] 61 | #[serde(rename_all = "kebab-case")] 62 | pub(crate) struct TrackersConfig { 63 | #[serde(default)] 64 | pub(crate) local_port: LocalPort, 65 | 66 | /// Number of peers to request per tracker 67 | #[serde(default = "default_numwant")] 68 | pub(crate) numwant: NonZeroU32, 69 | 70 | /// Maximum number of trackers per magnet link to communicate with at once 71 | #[serde(default = "default_tracker_jobs_per_magnet")] 72 | pub(crate) jobs_per_magnet: NonZeroUsize, 73 | 74 | /// Overall timeout for interacting with a tracker 75 | #[serde( 76 | default = "default_announce_timeout", 77 | deserialize_with = "deserialize_seconds" 78 | )] 79 | pub(crate) announce_timeout: Duration, 80 | 81 | /// Timeout for sending & receiving a "stopped" announcement to a tracker 82 | #[serde( 83 | default = "default_shutdown_timeout", 84 | deserialize_with = "deserialize_seconds" 85 | )] 86 | pub(crate) shutdown_timeout: Duration, 87 | } 88 | 89 | impl Default for TrackersConfig { 90 | fn default() -> TrackersConfig { 91 | TrackersConfig { 92 | local_port: LocalPort::default(), 93 | numwant: default_numwant(), 94 | jobs_per_magnet: default_tracker_jobs_per_magnet(), 95 | announce_timeout: default_announce_timeout(), 96 | shutdown_timeout: default_shutdown_timeout(), 97 | } 98 | } 99 | } 100 | 101 | #[derive(Clone, Debug, Deserialize, Eq, PartialEq)] 102 | #[serde(rename_all = "kebab-case")] 103 | pub(crate) struct PeersConfig { 104 | /// Maximum number of peers per magnet link to interact with at once 105 | #[serde(default = "default_peer_jobs_per_magnet")] 106 | pub(crate) jobs_per_magnet: NonZeroUsize, 107 | 108 | /// Timeout for connecting to a peer and performing the BitTorrent 109 | /// handshake and extended handshake 110 | #[serde( 111 | default = "default_handshake_timeout", 112 | deserialize_with = "deserialize_seconds" 113 | )] 114 | pub(crate) handshake_timeout: Duration, 115 | 116 | /// Timeout for receiving packet 2 from server during encryption handshake 117 | #[serde( 118 | default = "default_dh_exchange_timeout", 119 | deserialize_with = "deserialize_seconds" 120 | )] 121 | pub(crate) dh_exchange_timeout: Duration, 122 | } 123 | 124 | impl Default for PeersConfig { 125 | fn default() -> PeersConfig { 126 | PeersConfig { 127 | jobs_per_magnet: default_peer_jobs_per_magnet(), 128 | handshake_timeout: default_handshake_timeout(), 129 | dh_exchange_timeout: default_dh_exchange_timeout(), 130 | } 131 | } 132 | } 133 | 134 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 135 | pub(crate) enum LocalPort { 136 | Constant(u16), 137 | Range { low: u16, high: u16 }, 138 | } 139 | 140 | impl LocalPort { 141 | pub(crate) fn generate(&self, mut rng: R) -> u16 { 142 | match *self { 143 | LocalPort::Constant(p) => p, 144 | LocalPort::Range { low, high } => rng.random_range(low..=high), 145 | } 146 | } 147 | } 148 | 149 | impl Default for LocalPort { 150 | fn default() -> LocalPort { 151 | LocalPort::Range { 152 | low: 1025, 153 | high: 65535, 154 | } 155 | } 156 | } 157 | 158 | macro_rules! try_visit_int { 159 | ($($t:ty, $visit:ident),* $(,)?) => { 160 | $( 161 | fn $visit(self, p: $t) -> Result 162 | where 163 | E: serde::de::Error, 164 | { 165 | u16::try_from(p).map(LocalPort::Constant).map_err(|_| { 166 | E::invalid_value( 167 | Unexpected::Signed(p.into()), 168 | &"port number out of range; must be from 0 to 65535", 169 | ) 170 | }) 171 | } 172 | )* 173 | } 174 | } 175 | 176 | macro_rules! try_visit_uint { 177 | ($($t:ty, $visit:ident),* $(,)?) => { 178 | $( 179 | fn $visit(self, p: $t) -> Result 180 | where 181 | E: serde::de::Error, 182 | { 183 | u16::try_from(p).map(LocalPort::Constant).map_err(|_| { 184 | E::invalid_value( 185 | Unexpected::Unsigned(p.into()), 186 | &"port number out of range; must be from 0 to 65535", 187 | ) 188 | }) 189 | } 190 | )* 191 | } 192 | } 193 | 194 | impl<'de> Deserialize<'de> for LocalPort { 195 | fn deserialize>(deserializer: D) -> Result { 196 | struct Visitor; 197 | 198 | impl serde::de::Visitor<'_> for Visitor { 199 | type Value = LocalPort; 200 | 201 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 202 | formatter.write_str( 203 | "either a single port number or two ascending port numbers separated by a hyphen", 204 | ) 205 | } 206 | 207 | fn visit_u8(self, p: u8) -> Result 208 | where 209 | E: serde::de::Error, 210 | { 211 | Ok(LocalPort::Constant(u16::from(p))) 212 | } 213 | 214 | fn visit_u16(self, p: u16) -> Result 215 | where 216 | E: serde::de::Error, 217 | { 218 | Ok(LocalPort::Constant(p)) 219 | } 220 | 221 | try_visit_int!(i8, visit_i8, i16, visit_i16, i32, visit_i32, i64, visit_i64); 222 | try_visit_uint!(u32, visit_u32, u64, visit_u64); 223 | 224 | fn visit_str(self, input: &str) -> Result 225 | where 226 | E: serde::de::Error, 227 | { 228 | if input.chars().all(|c| c.is_ascii_digit()) { 229 | let Ok(p) = input.parse::() else { 230 | return Err(E::invalid_value(Unexpected::Str(input), &self)); 231 | }; 232 | Ok(LocalPort::Constant(p)) 233 | } else { 234 | let Some((pre, post)) = input.split_once('-') else { 235 | return Err(E::invalid_value(Unexpected::Str(input), &self)); 236 | }; 237 | let low = pre.trim().parse::().ok(); 238 | let high = post.trim().parse::().ok(); 239 | let Some((low, high)) = low.zip(high).filter(|(l, h)| l <= h) else { 240 | return Err(E::invalid_value(Unexpected::Str(input), &self)); 241 | }; 242 | Ok(LocalPort::Range { low, high }) 243 | } 244 | } 245 | } 246 | 247 | deserializer.deserialize_str(Visitor) 248 | } 249 | } 250 | 251 | #[derive(Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq)] 252 | #[serde(rename_all = "kebab-case")] 253 | pub(crate) enum CryptoPreference { 254 | Always, 255 | #[default] 256 | Prefer, 257 | IfRequired, 258 | Never, 259 | } 260 | 261 | impl CryptoPreference { 262 | pub(crate) fn get_tracker_crypto(&self) -> TrackerCrypto { 263 | match self { 264 | CryptoPreference::Always => TrackerCrypto::Required, 265 | CryptoPreference::Never => TrackerCrypto::Plain, 266 | _ => TrackerCrypto::Supported, 267 | } 268 | } 269 | 270 | pub(crate) fn get_crypto_mode(&self, requires_crypto: bool) -> Option { 271 | match (self, requires_crypto) { 272 | (CryptoPreference::Always, _) => Some(CryptoMode::Encrypt), 273 | (CryptoPreference::Prefer, true) => Some(CryptoMode::Encrypt), 274 | (CryptoPreference::Prefer, false) => Some(CryptoMode::Prefer), 275 | (CryptoPreference::IfRequired, true) => Some(CryptoMode::Encrypt), 276 | (CryptoPreference::IfRequired, false) => Some(CryptoMode::Plain), 277 | (CryptoPreference::Never, true) => None, 278 | (CryptoPreference::Never, false) => Some(CryptoMode::Plain), 279 | } 280 | } 281 | } 282 | 283 | #[derive(Debug, Error)] 284 | pub(crate) enum ConfigError { 285 | #[error("error reading configuration file")] 286 | Read(#[from] std::io::Error), 287 | #[error("error parsing configuration file")] 288 | Parse(#[from] toml::de::Error), 289 | } 290 | 291 | fn default_batch_jobs() -> NonZeroUsize { 292 | NonZeroUsize::new(50).expect("default general.batch-jobs should be nonzero") 293 | } 294 | 295 | fn default_numwant() -> NonZeroU32 { 296 | NonZeroU32::new(50).expect("default trackers.numwant should be nonzero") 297 | } 298 | 299 | fn default_tracker_jobs_per_magnet() -> NonZeroUsize { 300 | NonZeroUsize::new(30).expect("default trackers.jobs-per-magnet should be nonzero") 301 | } 302 | 303 | fn default_announce_timeout() -> Duration { 304 | Duration::from_secs(30) 305 | } 306 | 307 | fn default_shutdown_timeout() -> Duration { 308 | Duration::from_secs(3) 309 | } 310 | 311 | fn default_peer_jobs_per_magnet() -> NonZeroUsize { 312 | NonZeroUsize::new(30).expect("default peers.jobs-per-magnet should be nonzero") 313 | } 314 | 315 | fn default_handshake_timeout() -> Duration { 316 | Duration::from_secs(60) 317 | } 318 | 319 | fn default_dh_exchange_timeout() -> Duration { 320 | crate::peer::msepe::DEFAULT_DH_EXCHANGE_TIMEOUT 321 | } 322 | 323 | fn deserialize_seconds<'de, D>(deserializer: D) -> Result 324 | where 325 | D: Deserializer<'de>, 326 | { 327 | u64::deserialize(deserializer).map(Duration::from_secs) 328 | } 329 | 330 | #[cfg(test)] 331 | mod tests { 332 | use super::*; 333 | use std::io::Write; 334 | use tempfile::NamedTempFile; 335 | 336 | #[test] 337 | fn default_config() { 338 | let cfg = Config::default(); 339 | assert_eq!( 340 | cfg, 341 | Config { 342 | general: GeneralConfig { 343 | batch_jobs: NonZeroUsize::new(50).unwrap(), 344 | encrypt: CryptoPreference::Prefer, 345 | }, 346 | trackers: TrackersConfig { 347 | local_port: LocalPort::default(), 348 | numwant: NonZeroU32::new(50).unwrap(), 349 | jobs_per_magnet: NonZeroUsize::new(30).unwrap(), 350 | announce_timeout: Duration::from_secs(30), 351 | shutdown_timeout: Duration::from_secs(3), 352 | }, 353 | peers: PeersConfig { 354 | jobs_per_magnet: NonZeroUsize::new(30).unwrap(), 355 | handshake_timeout: Duration::from_secs(60), 356 | dh_exchange_timeout: Duration::from_secs(30), 357 | } 358 | } 359 | ); 360 | } 361 | 362 | fn load_config(cfg: &str) -> Result { 363 | let mut tmpfile = NamedTempFile::new().unwrap(); 364 | tmpfile.write_all(cfg.as_bytes()).unwrap(); 365 | tmpfile.flush().unwrap(); 366 | Config::load(tmpfile.path()) 367 | } 368 | 369 | #[test] 370 | fn empty_config() { 371 | let cfg = load_config("").unwrap(); 372 | assert_eq!(cfg, Config::default()); 373 | } 374 | 375 | #[test] 376 | fn int_local_port() { 377 | let cfg = load_config("[trackers]\nlocal-port = 60069\n").unwrap(); 378 | assert_eq!( 379 | cfg, 380 | Config { 381 | trackers: TrackersConfig { 382 | local_port: LocalPort::Constant(60069), 383 | ..TrackersConfig::default() 384 | }, 385 | ..Config::default() 386 | } 387 | ); 388 | } 389 | 390 | #[test] 391 | fn int_str_local_port() { 392 | let cfg = load_config("[trackers]\nlocal-port = \"60069\"\n").unwrap(); 393 | assert_eq!( 394 | cfg, 395 | Config { 396 | trackers: TrackersConfig { 397 | local_port: LocalPort::Constant(60069), 398 | ..TrackersConfig::default() 399 | }, 400 | ..Config::default() 401 | } 402 | ); 403 | } 404 | 405 | #[test] 406 | fn local_port_range() { 407 | let cfg = load_config("[trackers]\nlocal-port = \"3000-4000\"\n").unwrap(); 408 | assert_eq!( 409 | cfg, 410 | Config { 411 | trackers: TrackersConfig { 412 | local_port: LocalPort::Range { 413 | low: 3000, 414 | high: 4000 415 | }, 416 | ..TrackersConfig::default() 417 | }, 418 | ..Config::default() 419 | } 420 | ); 421 | } 422 | 423 | #[test] 424 | fn local_port_spaced_range() { 425 | let cfg = load_config("[trackers]\nlocal-port = \"3000 - 4000\"\n").unwrap(); 426 | assert_eq!( 427 | cfg, 428 | Config { 429 | trackers: TrackersConfig { 430 | local_port: LocalPort::Range { 431 | low: 3000, 432 | high: 4000 433 | }, 434 | ..TrackersConfig::default() 435 | }, 436 | ..Config::default() 437 | } 438 | ); 439 | } 440 | 441 | #[test] 442 | fn local_port_eq_range() { 443 | let cfg = load_config("[trackers]\nlocal-port = \"3000-3000\"\n").unwrap(); 444 | assert_eq!( 445 | cfg, 446 | Config { 447 | trackers: TrackersConfig { 448 | local_port: LocalPort::Range { 449 | low: 3000, 450 | high: 3000 451 | }, 452 | ..TrackersConfig::default() 453 | }, 454 | ..Config::default() 455 | } 456 | ); 457 | } 458 | 459 | #[test] 460 | fn descending_local_port_range() { 461 | assert!(load_config("[trackers]\nlocal-port = \"4000-3000\"\n").is_err()); 462 | } 463 | 464 | #[test] 465 | fn zero_duration() { 466 | let cfg = load_config("[trackers]\nshutdown-timeout = 0\n").unwrap(); 467 | assert_eq!( 468 | cfg, 469 | Config { 470 | trackers: TrackersConfig { 471 | shutdown_timeout: Duration::from_secs(0), 472 | ..TrackersConfig::default() 473 | }, 474 | ..Config::default() 475 | } 476 | ); 477 | } 478 | 479 | #[test] 480 | fn full_config() { 481 | let cfg = load_config(concat!( 482 | "[general]\n", 483 | "batch-jobs = 100\n", 484 | "encrypt = \"if-required\"\n", 485 | "\n", 486 | "[trackers]\n", 487 | "announce-timeout = 45\n", 488 | "jobs-per-magnet = 42\n", 489 | "local-port = \"10000-65535\"\n", 490 | "numwant = 75\n", 491 | "shutdown-timeout = 5\n", 492 | "\n", 493 | "[peers]\n", 494 | "dh-exchange-timeout = 10\n", 495 | "handshake-timeout = 120\n", 496 | "jobs-per-magnet = 23\n", 497 | )) 498 | .unwrap(); 499 | assert_eq!( 500 | cfg, 501 | Config { 502 | general: GeneralConfig { 503 | batch_jobs: NonZeroUsize::new(100).unwrap(), 504 | encrypt: CryptoPreference::IfRequired, 505 | }, 506 | trackers: TrackersConfig { 507 | local_port: LocalPort::Range { 508 | low: 10000, 509 | high: 65535 510 | }, 511 | numwant: NonZeroU32::new(75).unwrap(), 512 | jobs_per_magnet: NonZeroUsize::new(42).unwrap(), 513 | announce_timeout: Duration::from_secs(45), 514 | shutdown_timeout: Duration::from_secs(5), 515 | }, 516 | peers: PeersConfig { 517 | jobs_per_magnet: NonZeroUsize::new(23).unwrap(), 518 | handshake_timeout: Duration::from_secs(120), 519 | dh_exchange_timeout: Duration::from_secs(10), 520 | } 521 | } 522 | ); 523 | } 524 | 525 | #[test] 526 | fn generate_local_port_single_range() { 527 | let lp = LocalPort::Range { 528 | low: 1025, 529 | high: 1025, 530 | }; 531 | assert_eq!(lp.generate(rand::rng()), 1025); 532 | } 533 | } 534 | -------------------------------------------------------------------------------- /src/consts.rs: -------------------------------------------------------------------------------- 1 | use crate::peer::extensions::Extension; 2 | 3 | /// "left" value to use when announcing to a tracker for a torrent we have only 4 | /// the magnet link of 5 | pub(crate) const LEFT: u64 = 65535; 6 | 7 | /// Prefix for generated peer IDs (calculated from package version by `build.rs` script) 8 | pub(crate) static PEER_ID_PREFIX: &str = env!("PEER_ID_PREFIX"); 9 | 10 | /// Size of buffer for receiving incoming UDP packets. Any packets longer than 11 | /// this are truncated. 12 | pub(crate) const UDP_PACKET_LEN: usize = 65535; 13 | 14 | /// Maximum metadata size to accept 15 | pub(crate) const MAX_INFO_LENGTH: usize = 20 << 20; // 20 MiB 16 | 17 | /// BitTorrent protocol extensions supported by demagnetize 18 | pub(crate) const SUPPORTED_EXTENSIONS: [Extension; 2] = [Extension::Bep10, Extension::Fast]; 19 | 20 | /// Extended message ID to declare for receiving BEP 9 messages 21 | pub(crate) const UT_METADATA: u8 = 42; 22 | 23 | /// Client string to send in extended handshakes and to use as the "Created by" 24 | /// field in Torrent files 25 | pub(crate) static CLIENT: &str = concat!(env!("CARGO_PKG_NAME"), " ", env!("CARGO_PKG_VERSION")); 26 | 27 | /// Maximum length of a message to accept from a peer 28 | pub(crate) const MAX_PEER_MSG_LEN: usize = 65535; 29 | -------------------------------------------------------------------------------- /src/magnet.rs: -------------------------------------------------------------------------------- 1 | use crate::app::App; 2 | use crate::asyncutil::{UniqueByExt, WorkerNursery}; 3 | use crate::torrent::{PathTemplate, TorrentFile}; 4 | use crate::tracker::{Tracker, TrackerUrlError}; 5 | use crate::types::{InfoHash, InfoHashError}; 6 | use crate::util::ErrorChain; 7 | use futures_util::stream::{iter, StreamExt}; 8 | use patharg::InputArg; 9 | use std::fmt; 10 | use std::str::FromStr; 11 | use std::sync::Arc; 12 | use thiserror::Error; 13 | use url::Url; 14 | 15 | #[derive(Clone, Debug, Eq, PartialEq)] 16 | pub(crate) struct Magnet { 17 | info_hash: InfoHash, 18 | display_name: Option, 19 | trackers: Vec>, 20 | } 21 | 22 | impl Magnet { 23 | fn info_hash(&self) -> InfoHash { 24 | self.info_hash 25 | } 26 | 27 | fn display_name(&self) -> Option<&str> { 28 | self.display_name.as_deref() 29 | } 30 | 31 | fn trackers(&self) -> &[Arc] { 32 | &self.trackers 33 | } 34 | 35 | pub(crate) async fn get_torrent_file( 36 | &self, 37 | app: Arc, 38 | ) -> Result { 39 | log::info!("Fetching metadata info for {self}"); 40 | let info_hash = self.info_hash(); 41 | let (tracker_nursery, peer_stream) = WorkerNursery::new(app.cfg.trackers.jobs_per_magnet); 42 | for tracker in self.trackers() { 43 | let tracker = Arc::clone(tracker); 44 | let app = Arc::clone(&app); 45 | let display = self.to_string(); 46 | tracker_nursery 47 | .spawn(async move { 48 | match tracker.peer_getter(info_hash, app).run().await { 49 | Ok(peers) => iter(peers), 50 | Err(e) => { 51 | log::warn!( 52 | "Error communicating with {} for {}: {}", 53 | tracker, 54 | display, 55 | ErrorChain(e) 56 | ); 57 | iter(Vec::new()) 58 | } 59 | } 60 | }) 61 | .expect("tracker nursery should not be closed"); 62 | } 63 | drop(tracker_nursery); 64 | // Weed out duplicate peers, ignoring differences in peer IDs and 65 | // requires_crypto fields … for now: 66 | let mut peer_stream = peer_stream.flatten().unique_by(|peer| peer.address); 67 | let (peer_tasks, mut receiver) = WorkerNursery::new(app.cfg.peers.jobs_per_magnet); 68 | let peer_job = tokio::spawn(async move { 69 | while let Some(peer) = peer_stream.next().await { 70 | peer_tasks 71 | .spawn({ 72 | let app = Arc::clone(&app); 73 | async move { 74 | let r = peer.info_getter(info_hash, app).run().await; 75 | (peer, r) 76 | } 77 | }) 78 | .expect("peer task nursery should not be closed"); 79 | } 80 | // peer_tasks is dropped here, allowing for closure. 81 | }); 82 | while let Some((peer, r)) = receiver.next().await { 83 | match r { 84 | Ok(info) => { 85 | let tf = TorrentFile::new(info, self.trackers.clone()); 86 | peer_job.abort(); 87 | return Ok(tf); 88 | } 89 | Err(e) => log::warn!( 90 | "Failed to fetch info for {} from {}: {}", 91 | self, 92 | peer, 93 | ErrorChain(e) 94 | ), 95 | } 96 | } 97 | Err(GetInfoError) 98 | } 99 | 100 | pub(crate) async fn download_torrent_file( 101 | &self, 102 | template: Arc, 103 | app: Arc, 104 | ) -> Result<(), DownloadInfoError> { 105 | let tf = self.get_torrent_file(app).await?; 106 | tf.save(&template).await?; 107 | Ok(()) 108 | } 109 | } 110 | 111 | impl FromStr for Magnet { 112 | type Err = MagnetError; 113 | 114 | fn from_str(s: &str) -> Result { 115 | let url = Url::parse(s)?; 116 | if url.scheme() != "magnet" { 117 | return Err(MagnetError::NotMagnet); 118 | } 119 | let mut info_hash = None; 120 | let mut dn = None; 121 | let mut trackers = Vec::new(); 122 | for (k, v) in url.query_pairs() { 123 | match k.as_ref() { 124 | "xt" => { 125 | if info_hash.is_none() { 126 | info_hash = Some(parse_xt(&v)?); 127 | } else { 128 | return Err(MagnetError::MultipleXt); 129 | } 130 | } 131 | "dn" => { 132 | let _ = dn.insert(v); 133 | } 134 | "tr" => trackers.push(Arc::new(v.parse::()?)), 135 | _ => (), 136 | } 137 | } 138 | let Some(info_hash) = info_hash else { 139 | return Err(MagnetError::NoXt); 140 | }; 141 | if trackers.is_empty() { 142 | return Err(MagnetError::NoTrackers); 143 | } 144 | Ok(Magnet { 145 | info_hash, 146 | display_name: dn.map(String::from), 147 | trackers, 148 | }) 149 | } 150 | } 151 | 152 | impl fmt::Display for Magnet { 153 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 154 | if let Some(dn) = self.display_name() { 155 | write!(f, "{dn:?} ({})", self.info_hash) 156 | } else { 157 | write!(f, "{}", self.info_hash) 158 | } 159 | } 160 | } 161 | 162 | #[derive(Clone, Debug, Eq, Error, PartialEq)] 163 | pub(crate) enum MagnetError { 164 | #[error("invalid URI")] 165 | Url(#[from] url::ParseError), 166 | #[error("not a magnet URI")] 167 | NotMagnet, 168 | #[error("magnet URI lacks \"xt\" parameter")] 169 | NoXt, 170 | #[error("invalid \"xt\" parameter")] 171 | InvalidXt(#[from] XtError), 172 | #[error("magnet URI has multiple \"xt\" parameters")] 173 | MultipleXt, 174 | #[error("no trackers given in magnet URI")] 175 | NoTrackers, 176 | #[error("invalid \"tr\" parameter")] 177 | InvalidTracker(#[from] TrackerUrlError), 178 | } 179 | 180 | fn parse_xt(xt: &str) -> Result { 181 | let Some(s) = xt.strip_prefix("urn:") else { 182 | return Err(XtError::NotUrn); 183 | }; 184 | let Some(s) = s.strip_prefix("btih:") else { 185 | return Err(XtError::NotBtih); 186 | }; 187 | Ok(s.parse::()?) 188 | } 189 | 190 | #[derive(Copy, Clone, Debug, Eq, Error, PartialEq)] 191 | pub(crate) enum XtError { 192 | #[error("\"xt\" parameter is not a URN")] 193 | NotUrn, 194 | #[error("\"xt\" parameter is not in the \"btih\" namespace")] 195 | NotBtih, 196 | #[error("\"xt\" parameter contains invalid info hash")] 197 | InfoHash(#[from] InfoHashError), 198 | } 199 | 200 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 201 | #[error("no peer returned metadata info")] 202 | pub(crate) struct GetInfoError; 203 | 204 | #[derive(Debug, Error)] 205 | pub(crate) enum DownloadInfoError { 206 | #[error(transparent)] 207 | Get(#[from] GetInfoError), 208 | #[error("failed to save torrent file")] 209 | Save(#[from] std::io::Error), 210 | } 211 | 212 | pub(crate) async fn parse_magnets_file(input: InputArg) -> Result, MagnetsFileError> { 213 | let mut lines = input 214 | .async_lines() 215 | .await 216 | .map_err(MagnetsFileError::Open)? 217 | .enumerate(); 218 | let mut magnets = Vec::new(); 219 | while let Some((i, r)) = lines.next().await { 220 | let ln = r.map_err(MagnetsFileError::Read)?; 221 | let ln = ln.trim(); 222 | if ln.is_empty() || ln.starts_with('#') { 223 | continue; 224 | } 225 | match ln.parse::() { 226 | Ok(m) => magnets.push(m), 227 | Err(e) => { 228 | return Err(MagnetsFileError::Parse { 229 | lineno: i + 1, 230 | source: e, 231 | }) 232 | } 233 | } 234 | } 235 | Ok(magnets) 236 | } 237 | 238 | #[derive(Debug, Error)] 239 | pub(crate) enum MagnetsFileError { 240 | #[error("failed to open file")] 241 | Open(#[source] std::io::Error), 242 | #[error("failed reading from file")] 243 | Read(#[source] std::io::Error), 244 | #[error("invalid magnet link on line {lineno}")] 245 | Parse { lineno: usize, source: MagnetError }, 246 | } 247 | 248 | #[cfg(test)] 249 | mod tests { 250 | use super::*; 251 | 252 | #[test] 253 | fn test_parse_magnet() { 254 | let magnet = "magnet:?xt=urn:btih:28c55196f57753c40aceb6fb58617e6995a7eddb&dn=debian-11.2.0-amd64-netinst.iso&tr=http%3A%2F%2Fbttracker.debian.org%3A6969%2Fannounce".parse::().unwrap(); 255 | assert_eq!( 256 | magnet.info_hash().as_bytes(), 257 | b"\x28\xC5\x51\x96\xF5\x77\x53\xC4\x0A\xCE\xB6\xFB\x58\x61\x7E\x69\x95\xA7\xED\xDB" 258 | ); 259 | assert_eq!( 260 | magnet.display_name(), 261 | Some("debian-11.2.0-amd64-netinst.iso") 262 | ); 263 | assert_eq!( 264 | magnet.trackers(), 265 | [Arc::new( 266 | "http://bttracker.debian.org:6969/announce" 267 | .parse::() 268 | .unwrap() 269 | )] 270 | ); 271 | } 272 | } 273 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod app; 2 | mod asyncutil; 3 | mod config; 4 | mod consts; 5 | mod magnet; 6 | mod peer; 7 | mod torrent; 8 | mod tracker; 9 | mod types; 10 | mod util; 11 | use crate::app::App; 12 | use crate::asyncutil::WorkerNursery; 13 | use crate::config::{Config, ConfigError}; 14 | use crate::magnet::{parse_magnets_file, Magnet}; 15 | use crate::peer::{CryptoMode, Peer}; 16 | use crate::torrent::{PathTemplate, TorrentFile}; 17 | use crate::tracker::{Tracker, TrackerCrypto}; 18 | use crate::types::InfoHash; 19 | use crate::util::ErrorChain; 20 | use anstream::AutoStream; 21 | use anstyle::{AnsiColor, Style}; 22 | use clap::{Parser, Subcommand}; 23 | use futures_util::StreamExt; 24 | use log::{Level, LevelFilter}; 25 | use patharg::InputArg; 26 | use std::path::PathBuf; 27 | use std::process::ExitCode; 28 | use std::sync::Arc; 29 | 30 | /// Convert magnet links to .torrent files 31 | #[derive(Clone, Debug, Eq, Parser, PartialEq)] 32 | #[command(version)] 33 | struct Arguments { 34 | /// Read program configuration from the given file 35 | #[arg(short, long, value_name = "FILE")] 36 | config: Option, 37 | 38 | /// Set logging level 39 | #[arg( 40 | short, 41 | long, 42 | default_value = "INFO", 43 | value_name = "OFF|ERROR|WARN|INFO|DEBUG|TRACE" 44 | )] 45 | log_level: LevelFilter, 46 | 47 | /// Do not read any configuration files 48 | #[arg(long, conflicts_with = "config")] 49 | no_config: bool, 50 | 51 | #[command(subcommand)] 52 | command: Command, 53 | } 54 | 55 | impl Arguments { 56 | async fn run(mut self) -> ExitCode { 57 | init_logging(self.log_level); 58 | let cfg = if self.no_config { 59 | Config::default() 60 | } else { 61 | let (cfgpath, defpath) = if let Some(p) = self.config.take() { 62 | (p, false) 63 | } else if let Some(p) = Config::default_path() { 64 | (p, true) 65 | } else { 66 | log::error!("Failed to locate configuration file: could not determine user's home directory"); 67 | return ExitCode::FAILURE; 68 | }; 69 | log::debug!( 70 | "Reading program configuration from {} ...", 71 | cfgpath.display() 72 | ); 73 | match Config::load(&cfgpath) { 74 | Ok(cfg) => cfg, 75 | Err(ConfigError::Read(e)) 76 | if e.kind() == std::io::ErrorKind::NotFound && defpath => 77 | { 78 | log::debug!( 79 | "Default configuration file does not exist; using default settings" 80 | ); 81 | Config::default() 82 | } 83 | Err(e) => { 84 | log::error!( 85 | "Failed to get program configuration from file {}: {}", 86 | cfgpath.display(), 87 | ErrorChain(e) 88 | ); 89 | return ExitCode::FAILURE; 90 | } 91 | } 92 | }; 93 | let app = Arc::new(App::new(cfg, rand::rng())); 94 | log::debug!("Using local peer details: {}", app.local); 95 | let r = self.command.run(Arc::clone(&app)).await; 96 | app.shutdown().await; 97 | r 98 | } 99 | } 100 | 101 | #[derive(Clone, Debug, Eq, PartialEq, Subcommand)] 102 | enum Command { 103 | /// Download the .torrent file for a single magnet link 104 | Get { 105 | /// Save the .torrent file to the given path. 106 | /// 107 | /// The path may contain a `{name}` placeholder, which will be replaced 108 | /// by the (sanitized) name of the torrent, and/or a `{hash}` 109 | /// placeholder, which will be replaced by the torrent's info hash in 110 | /// hexadecimal. 111 | #[arg(short, long, default_value = "{name}.torrent")] 112 | outfile: PathTemplate, 113 | 114 | magnet: Magnet, 115 | }, 116 | /// Download the .torrent files for a file of magnet links 117 | Batch { 118 | /// Save the .torrent files to the given path template. 119 | /// 120 | /// The path template may contain a `{name}` placeholder, which will be 121 | /// replaced by the (sanitized) name of each torrent, and/or a `{hash}` 122 | /// placeholder, which will be replaced by each torrent's info hash in 123 | /// hexadecimal. 124 | #[arg(short, long, default_value = "{name}.torrent")] 125 | outfile: PathTemplate, 126 | 127 | /// A file listing magnet links, one per line. Empty lines and lines 128 | /// starting with '#' are ignored. 129 | file: InputArg, 130 | }, 131 | /// Fetch peers for an info hash from a specific tracker 132 | QueryTracker { 133 | /// Output peers as JSON objects, one per line 134 | #[arg(short = 'J', long)] 135 | json: bool, 136 | 137 | /// Do not tell the tracker anything about our encryption support 138 | #[arg( 139 | long, 140 | conflicts_with = "require_crypto", 141 | conflicts_with = "support_crypto" 142 | )] 143 | no_crypto: bool, 144 | 145 | /// Tell the tracker that we require peers with encryption support 146 | #[arg(long)] 147 | require_crypto: bool, 148 | 149 | /// Tell the tracker that we support the encrypted peer protocol 150 | #[arg(long, conflicts_with = "require_crypto")] 151 | support_crypto: bool, 152 | 153 | /// The tracker to scrape, as an HTTP or UDP URL. 154 | tracker: Tracker, 155 | 156 | /// The info hash of the torrent to get peers for. 157 | /// 158 | /// This must be either a 40-character hex string or a 32-character 159 | /// base32 string. 160 | info_hash: InfoHash, 161 | }, 162 | /// Fetch torrent metadata for an info hash from a specific peer 163 | /// 164 | /// Note that the resulting .torrent file will not contain any trackers. 165 | QueryPeer { 166 | /// Create an encrypted connection to the peer 167 | #[arg(long)] 168 | encrypt: bool, 169 | 170 | /// Create an unencrypted connection to the peer 171 | #[arg(long, conflicts_with = "encrypt")] 172 | no_encrypt: bool, 173 | 174 | /// Save the .torrent file to the given path. 175 | /// 176 | /// The path may contain a `{name}` placeholder, which will be replaced 177 | /// by the (sanitized) name of the torrent, and/or a `{hash}` 178 | /// placeholder, which will be replaced by the torrent's info hash in 179 | /// hexadecimal. 180 | #[arg(short, long, default_value = "{name}.torrent")] 181 | outfile: PathTemplate, 182 | 183 | /// Attempt to create an encrypted connection to the peer; if that 184 | /// fails, try again without encryption 185 | #[arg(long, conflicts_with = "encrypt", conflicts_with = "no_encrypt")] 186 | prefer_encrypt: bool, 187 | 188 | /// The peer to get metadata from, in the form "IP:PORT" (or 189 | /// "[IP]:PORT" for IPv6). 190 | peer: Peer, 191 | 192 | /// The info hash of the torrent to get metadata for. 193 | /// 194 | /// This must be either a 40-character hex string or a 32-character 195 | /// base32 string. 196 | info_hash: InfoHash, 197 | }, 198 | } 199 | 200 | impl Command { 201 | async fn run(self, app: Arc) -> ExitCode { 202 | match self { 203 | Command::Get { outfile, magnet } => { 204 | if let Err(e) = magnet 205 | .download_torrent_file(Arc::new(outfile), Arc::clone(&app)) 206 | .await 207 | { 208 | log::error!("Failed to download torrent file: {}", ErrorChain(e)); 209 | ExitCode::FAILURE 210 | } else { 211 | ExitCode::SUCCESS 212 | } 213 | } 214 | Command::Batch { outfile, file } => { 215 | let magnets = match parse_magnets_file(file).await { 216 | Ok(magnets) => magnets, 217 | Err(e) => { 218 | log::error!("Error reading magnets file: {}", ErrorChain(e)); 219 | return ExitCode::FAILURE; 220 | } 221 | }; 222 | if magnets.is_empty() { 223 | log::info!("No magnet links supplied"); 224 | return ExitCode::SUCCESS; 225 | } 226 | let mut success = 0usize; 227 | let mut total = 0usize; 228 | let outfile = Arc::new(outfile); 229 | let (nursery, mut outputs) = WorkerNursery::new(app.cfg.general.batch_jobs); 230 | for magnet in magnets { 231 | let app = Arc::clone(&app); 232 | let outf = Arc::clone(&outfile); 233 | nursery 234 | .spawn(async move { 235 | if let Err(e) = magnet.download_torrent_file(outf, app).await { 236 | log::error!( 237 | "Failed to download torrent file for {magnet}: {}", 238 | ErrorChain(e) 239 | ); 240 | false 241 | } else { 242 | true 243 | } 244 | }) 245 | .expect("worker nursery should not be closed"); 246 | } 247 | drop(nursery); 248 | while let Some(b) = outputs.next().await { 249 | if b { 250 | success += 1; 251 | } 252 | total += 1; 253 | } 254 | log::info!( 255 | "{success}/{total} magnet links successfully converted to torrent files" 256 | ); 257 | if success == total { 258 | ExitCode::SUCCESS 259 | } else { 260 | ExitCode::FAILURE 261 | } 262 | } 263 | Command::QueryTracker { 264 | json, 265 | tracker, 266 | info_hash, 267 | no_crypto, 268 | require_crypto, 269 | support_crypto, 270 | } => { 271 | let tracker_crypto = match (require_crypto, support_crypto, no_crypto) { 272 | (true, _, _) => Some(TrackerCrypto::Required), 273 | (false, true, _) => Some(TrackerCrypto::Supported), 274 | (false, false, true) => Some(TrackerCrypto::Plain), 275 | (false, false, false) => None, 276 | }; 277 | let r = match tracker 278 | .peer_getter(info_hash, Arc::clone(&app)) 279 | .tracker_crypto(tracker_crypto) 280 | .run() 281 | .await 282 | { 283 | Ok(peers) => { 284 | for p in peers { 285 | if json { 286 | println!("{}", p.display_json()); 287 | } else { 288 | println!("{}", p.address); 289 | } 290 | } 291 | ExitCode::SUCCESS 292 | } 293 | Err(e) => { 294 | log::error!("Error communicating with tracker: {}", ErrorChain(e)); 295 | ExitCode::FAILURE 296 | } 297 | }; 298 | r 299 | } 300 | Command::QueryPeer { 301 | outfile, 302 | peer, 303 | info_hash, 304 | encrypt, 305 | prefer_encrypt, 306 | no_encrypt, 307 | } => { 308 | let crypto_mode = match (encrypt, prefer_encrypt, no_encrypt) { 309 | (true, _, _) => Some(CryptoMode::Encrypt), 310 | (false, true, _) => Some(CryptoMode::Prefer), 311 | (false, false, true) => Some(CryptoMode::Plain), 312 | (false, false, false) => None, 313 | }; 314 | match peer 315 | .info_getter(info_hash, app) 316 | .crypto_mode(crypto_mode) 317 | .run() 318 | .await 319 | { 320 | Ok(info) => { 321 | let tf = TorrentFile::new(info, Vec::new()); 322 | if let Err(e) = tf.save(&outfile).await { 323 | log::error!("Failed to write to file: {}", ErrorChain(e)); 324 | ExitCode::FAILURE 325 | } else { 326 | ExitCode::SUCCESS 327 | } 328 | } 329 | Err(e) => { 330 | log::error!("Failed to fetch info from peer: {}", ErrorChain(e)); 331 | ExitCode::FAILURE 332 | } 333 | } 334 | } 335 | } 336 | } 337 | } 338 | 339 | #[tokio::main] 340 | async fn main() -> ExitCode { 341 | Arguments::parse().run().await 342 | } 343 | 344 | fn init_logging(log_level: LevelFilter) { 345 | let stderr: Box = Box::new(AutoStream::auto(std::io::stderr())); 346 | fern::Dispatch::new() 347 | .format(|out, message, record| { 348 | use AnsiColor::*; 349 | let style = match record.level() { 350 | Level::Error => Style::new().fg_color(Some(Red.into())), 351 | Level::Warn => Style::new().fg_color(Some(Yellow.into())), 352 | Level::Info => Style::new().bold(), 353 | Level::Debug => Style::new().fg_color(Some(Cyan.into())), 354 | Level::Trace => Style::new().fg_color(Some(Green.into())), 355 | }; 356 | out.finish(format_args!( 357 | "{}{} [{:<5}] {}{}", 358 | style.render(), 359 | chrono::Local::now().format("%H:%M:%S"), 360 | record.level(), 361 | message, 362 | style.render_reset(), 363 | )); 364 | }) 365 | .level(LevelFilter::Info) 366 | .level_for("demagnetize", log_level) 367 | .chain(stderr) 368 | .apply() 369 | .expect("no other logger should have been previously initialized"); 370 | } 371 | -------------------------------------------------------------------------------- /src/peer/extensions.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{BTreeMap, HashMap}; 2 | use std::fmt; 3 | use std::ops::BitAnd; 4 | use std::str::FromStr; 5 | use strum::IntoEnumIterator; 6 | use strum_macros::EnumIter; 7 | use thiserror::Error; 8 | 9 | #[derive(Copy, Clone, Debug, EnumIter, Eq, Hash, Ord, PartialEq, PartialOrd)] 10 | pub(crate) enum Extension { 11 | AzureusMessaging = 63, // byte 0, 0x80 (BEP 4) 12 | LocationAware = 43, // byte 2, 0x08 (BEP 4) 13 | Bep10 = 20, // byte 5, 0x10 (BEP 10) 14 | Dht = 0, // byte 7, 0x01 (BEP 4, BEP 5) 15 | XbtPex = 1, // byte 7, 0x02 (BEP 4) 16 | Fast = 2, // byte 7, 0x04 (BEP 4, BEP 6) 17 | NatTraversal = 3, // byte 7, 0x08 (BEP 4) 18 | HybridV2 = 4, // byte 7, 0x10 (BEP 4, BEP 52) 19 | } 20 | 21 | impl Extension { 22 | fn bit(self) -> u64 { 23 | 1 << (self as u8) 24 | } 25 | } 26 | 27 | impl fmt::Display for Extension { 28 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 29 | use Extension::*; 30 | match self { 31 | AzureusMessaging => write!(f, "Azureus Messaging Protocol"), 32 | LocationAware => write!(f, "BitTorrent Location-aware Protocol"), 33 | Bep10 => write!(f, "BEP 10 Extension Protocol"), 34 | Dht => write!(f, "BitTorrent DHT"), 35 | XbtPex => write!(f, "XBT Peer Exchange"), 36 | Fast => write!(f, "Fast Extension"), 37 | NatTraversal => write!(f, "NAT Traversal"), 38 | HybridV2 => write!(f, "hybrid torrent legacy to v2 upgrade"), 39 | } 40 | } 41 | } 42 | 43 | #[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq)] 44 | pub(super) struct ExtensionSet(u64); 45 | 46 | impl ExtensionSet { 47 | pub(super) fn has(&self, ext: Extension) -> bool { 48 | self.0 & ext.bit() != 0 49 | } 50 | } 51 | 52 | impl fmt::Display for ExtensionSet { 53 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 54 | let mut extset = self.0; 55 | let mut first = true; 56 | for ext in Extension::iter() { 57 | if extset & ext.bit() != 0 { 58 | if !std::mem::replace(&mut first, false) { 59 | write!(f, ", ")?; 60 | } 61 | write!(f, "{ext}")?; 62 | } 63 | extset &= !ext.bit(); 64 | } 65 | if extset != 0 { 66 | if !std::mem::replace(&mut first, false) { 67 | write!(f, ", ")?; 68 | } 69 | write!(f, "Unknown({extset:#018x})")?; 70 | } 71 | if first { 72 | write!(f, "")?; 73 | } 74 | Ok(()) 75 | } 76 | } 77 | 78 | impl BitAnd for ExtensionSet { 79 | type Output = ExtensionSet; 80 | 81 | fn bitand(self, rhs: ExtensionSet) -> ExtensionSet { 82 | ExtensionSet(self.0 & rhs.0) 83 | } 84 | } 85 | 86 | impl From for u64 { 87 | fn from(extset: ExtensionSet) -> u64 { 88 | extset.0 89 | } 90 | } 91 | 92 | impl From for ExtensionSet { 93 | fn from(extset: u64) -> ExtensionSet { 94 | ExtensionSet(extset) 95 | } 96 | } 97 | 98 | impl FromIterator for ExtensionSet { 99 | fn from_iter(iter: I) -> ExtensionSet 100 | where 101 | I: IntoIterator, 102 | { 103 | let mut value = 0; 104 | for ext in iter { 105 | value |= ext.bit(); 106 | } 107 | ExtensionSet(value) 108 | } 109 | } 110 | 111 | #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 112 | pub(crate) enum Bep10Extension { 113 | Metadata, // BEP 9 114 | //Pex, // BEP 11 115 | //Holepunch, // BEP 55 116 | } 117 | 118 | impl FromStr for Bep10Extension { 119 | type Err = Bep10Error; 120 | 121 | fn from_str(s: &str) -> Result { 122 | match s { 123 | "ut_metadata" => Ok(Bep10Extension::Metadata), 124 | //"ut_pex" => Ok(Bep10Extension::Pex), 125 | _ => Err(Bep10Error), 126 | } 127 | } 128 | } 129 | 130 | impl fmt::Display for Bep10Extension { 131 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 132 | match self { 133 | Bep10Extension::Metadata => write!(f, "ut_metadata"), 134 | //Bep10Extension::Pex => write!(f, "ut_pex"), 135 | } 136 | } 137 | } 138 | 139 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 140 | #[error("unknown extension dict key")] 141 | pub(crate) struct Bep10Error; 142 | 143 | #[derive(Clone, Debug, Eq, PartialEq)] 144 | pub(super) struct Bep10Registry { 145 | to_code: HashMap, 146 | #[allow(clippy::zero_sized_map_values)] 147 | from_code: HashMap, 148 | } 149 | 150 | impl Bep10Registry { 151 | #[allow(clippy::zero_sized_map_values)] 152 | pub(super) fn new() -> Bep10Registry { 153 | Bep10Registry { 154 | to_code: HashMap::new(), 155 | from_code: HashMap::new(), 156 | } 157 | } 158 | 159 | pub(super) fn from_m(m: BTreeMap) -> Result { 160 | let mut registry = Bep10Registry::new(); 161 | for (k, v) in m { 162 | if let Ok(ext) = k.parse::() { 163 | registry.register(ext, v)?; 164 | } 165 | } 166 | Ok(registry) 167 | } 168 | 169 | pub(super) fn to_m(&self) -> BTreeMap { 170 | let mut m = BTreeMap::new(); 171 | for (&ext, &code) in &self.to_code { 172 | m.insert(ext.to_string(), code); 173 | } 174 | m 175 | } 176 | 177 | pub(super) fn contains(&self, ext: Bep10Extension) -> bool { 178 | self.to_code.contains_key(&ext) 179 | } 180 | 181 | pub(super) fn get_message_id(&self, ext: Bep10Extension) -> Option { 182 | self.to_code.get(&ext).copied() 183 | } 184 | 185 | pub(super) fn for_message_id(&self, code: u8) -> Option { 186 | self.from_code.get(&code).copied() 187 | } 188 | 189 | pub(super) fn register( 190 | &mut self, 191 | ext: Bep10Extension, 192 | code: u8, 193 | ) -> Result<(), Bep10RegistryError> { 194 | if code == 0 { 195 | return Err(Bep10RegistryError::Handshake); 196 | } 197 | if self.from_code.contains_key(&code) { 198 | return Err(Bep10RegistryError::Code(code)); 199 | } 200 | if self.to_code.contains_key(&ext) { 201 | return Err(Bep10RegistryError::Ext(ext)); 202 | } 203 | self.from_code.insert(code, ext); 204 | self.to_code.insert(ext, code); 205 | Ok(()) 206 | } 207 | } 208 | 209 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 210 | pub(crate) enum Bep10RegistryError { 211 | #[error("extended message ID 0 listed in \"m\"")] 212 | Handshake, 213 | #[error("extended message ID {0} listed in \"m\" more than once")] 214 | Code(u8), 215 | #[error("extension {0} listed in \"m\" more than once")] 216 | Ext(Bep10Extension), 217 | } 218 | 219 | #[cfg(test)] 220 | mod tests { 221 | use super::*; 222 | 223 | #[test] 224 | fn test_extension_iter() { 225 | use Extension::*; 226 | let mut iter = Extension::iter(); 227 | assert_eq!(iter.next(), Some(AzureusMessaging)); 228 | assert_eq!(iter.next(), Some(LocationAware)); 229 | assert_eq!(iter.next(), Some(Bep10)); 230 | assert_eq!(iter.next(), Some(Dht)); 231 | assert_eq!(iter.next(), Some(XbtPex)); 232 | assert_eq!(iter.next(), Some(Fast)); 233 | assert_eq!(iter.next(), Some(NatTraversal)); 234 | assert_eq!(iter.next(), Some(HybridV2)); 235 | assert_eq!(iter.next(), None); 236 | } 237 | 238 | #[test] 239 | fn test_extension_set() { 240 | let extset = [Extension::Bep10, Extension::Dht, Extension::Fast] 241 | .into_iter() 242 | .collect::(); 243 | assert!(extset.has(Extension::Bep10)); 244 | assert!(extset.has(Extension::Dht)); 245 | assert!(extset.has(Extension::Fast)); 246 | assert!(!extset.has(Extension::LocationAware)); 247 | assert!(!extset.has(Extension::XbtPex)); 248 | assert_eq!( 249 | extset.to_string(), 250 | "BEP 10 Extension Protocol, BitTorrent DHT, Fast Extension" 251 | ); 252 | assert_eq!(u64::from(extset), 0x100005); 253 | assert_eq!(ExtensionSet::from(0x100005u64), extset); 254 | } 255 | 256 | #[test] 257 | fn test_default_extension_set() { 258 | let extset = ExtensionSet::default(); 259 | assert_eq!(u64::from(extset), 0); 260 | assert_eq!(ExtensionSet::from(0), extset); 261 | for ext in Extension::iter() { 262 | assert!(!extset.has(ext)); 263 | } 264 | assert_eq!(extset.to_string(), ""); 265 | } 266 | 267 | #[test] 268 | fn test_extension_set_unknown() { 269 | let extset = ExtensionSet::from(0x8404u64); 270 | for ext in Extension::iter() { 271 | if ext == Extension::Fast { 272 | assert!(extset.has(ext)); 273 | } else { 274 | assert!(!extset.has(ext)); 275 | } 276 | } 277 | assert_eq!( 278 | extset.to_string(), 279 | "Fast Extension, Unknown(0x0000000000008400)" 280 | ); 281 | } 282 | 283 | #[test] 284 | fn test_extension_set_all_unknown() { 285 | let extset = ExtensionSet::from(0x8400u64); 286 | for ext in Extension::iter() { 287 | assert!(!extset.has(ext)); 288 | } 289 | assert_eq!(extset.to_string(), "Unknown(0x0000000000008400)"); 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /src/torrent.rs: -------------------------------------------------------------------------------- 1 | use crate::consts::{CLIENT, MAX_INFO_LENGTH}; 2 | use crate::tracker::Tracker; 3 | use crate::types::InfoHash; 4 | use bendy::decoding::{Decoder, Object}; 5 | use bendy::encoding::ToBencode; 6 | use bytes::{BufMut, Bytes, BytesMut}; 7 | use patharg::OutputArg; 8 | use sha1::{Digest, Sha1}; 9 | use std::borrow::Cow; 10 | use std::fmt::Write; 11 | use std::iter::Peekable; 12 | use std::ops::Range; 13 | use std::path::Path; 14 | use std::str::FromStr; 15 | use std::sync::Arc; 16 | use std::time::{SystemTime, UNIX_EPOCH}; 17 | use thiserror::Error; 18 | use tokio::fs::create_dir_all; 19 | 20 | #[derive(Clone, Debug, Eq, PartialEq)] 21 | pub(crate) struct TorrentInfo { 22 | info_hash: InfoHash, 23 | data: Bytes, 24 | } 25 | 26 | impl TorrentInfo { 27 | pub(crate) fn name(&self) -> Option> { 28 | let mut decoder = Decoder::new(&self.data); 29 | let Ok(Some(obj)) = decoder.next_object() else { 30 | unreachable!(); 31 | }; 32 | let mut dd = obj 33 | .try_into_dictionary() 34 | .expect("Torrent info should be a dict"); 35 | while let Some(kv) = dd.next_pair().ok()? { 36 | if let (b"name", v) = kv { 37 | return Some(String::from_utf8_lossy(v.try_into_bytes().ok()?)); 38 | } 39 | } 40 | None 41 | } 42 | } 43 | 44 | impl From for Bytes { 45 | fn from(info: TorrentInfo) -> Bytes { 46 | info.data 47 | } 48 | } 49 | 50 | #[derive(Clone, Debug)] 51 | pub(crate) struct TorrentInfoBuilder { 52 | info_hash: InfoHash, 53 | hasher: Sha1, 54 | data: BytesMut, 55 | sizes: Vec, 56 | index_iter: Peekable>, 57 | } 58 | 59 | impl TorrentInfoBuilder { 60 | const PIECE_LENGTH: usize = 16 << 10; // 16 KiB 61 | 62 | pub(crate) fn new( 63 | info_hash: InfoHash, 64 | length: u32, 65 | ) -> Result { 66 | let Ok(lgth) = usize::try_from(length) else { 67 | return Err(ConstructError::TooLarge(length)); 68 | }; 69 | if lgth > MAX_INFO_LENGTH { 70 | return Err(ConstructError::TooLarge(length)); 71 | } 72 | let hasher = Sha1::new(); 73 | let data = BytesMut::with_capacity(lgth); 74 | let mut sizes = vec![Self::PIECE_LENGTH; lgth / Self::PIECE_LENGTH]; 75 | let overflow = lgth % Self::PIECE_LENGTH; 76 | if overflow > 0 { 77 | sizes.push(overflow); 78 | } 79 | let piece_qty = 80 | u32::try_from(sizes.len()).expect("number of metadata pieces should fit in a u32"); 81 | Ok(TorrentInfoBuilder { 82 | info_hash, 83 | hasher, 84 | data, 85 | sizes, 86 | index_iter: (0..piece_qty).peekable(), 87 | }) 88 | } 89 | 90 | pub(crate) fn push(&mut self, piece: Bytes) -> Result<(), PushError> { 91 | let Some(index) = self.index_iter.next() else { 92 | return Err(PushError::TooManyPieces); 93 | }; 94 | let index = usize::try_from(index).expect("piece indices should fit in a usize"); 95 | if piece.len() != self.sizes[index] { 96 | return Err(PushError::Length { 97 | index, 98 | expected: self.sizes[index], 99 | got: piece.len(), 100 | }); 101 | } 102 | self.hasher.update(&piece); 103 | self.data.extend(piece); 104 | Ok(()) 105 | } 106 | 107 | pub(crate) fn next_piece(&mut self) -> Option { 108 | self.index_iter.peek().copied() 109 | } 110 | 111 | pub(crate) fn build(self) -> Result { 112 | let left = self.index_iter.count(); 113 | if left > 0 { 114 | return Err(BuildError::NotFinished { left }); 115 | } 116 | let got_hash = Bytes::from_iter(self.hasher.finalize()); 117 | if got_hash != self.info_hash.as_bytes() { 118 | return Err(BuildError::Digest { 119 | expected: self.info_hash, 120 | got: got_hash, 121 | }); 122 | } 123 | let data = self.data.freeze(); 124 | check_bencode_dict(&data)?; 125 | Ok(TorrentInfo { 126 | info_hash: self.info_hash, 127 | data, 128 | }) 129 | } 130 | } 131 | 132 | #[derive(Clone, Debug, Eq, PartialEq)] 133 | pub(crate) struct TorrentFile { 134 | info: TorrentInfo, 135 | trackers: Vec>, 136 | creation_date: i64, 137 | created_by: String, 138 | } 139 | 140 | impl TorrentFile { 141 | pub(crate) fn new(info: TorrentInfo, trackers: Vec>) -> TorrentFile { 142 | TorrentFile { 143 | trackers, 144 | created_by: CLIENT.into(), 145 | creation_date: unix_now(), 146 | info, 147 | } 148 | } 149 | 150 | pub(crate) async fn save(self, template: &PathTemplate) -> std::io::Result<()> { 151 | let name = sanitize(self.info.name().as_deref().unwrap_or("NONAME")); 152 | let path = OutputArg::from_arg(template.format(&name, self.info.info_hash)); 153 | if path.is_stdout() { 154 | log::info!( 155 | "Writing torrent for info hash {} to stdout", 156 | self.info.info_hash 157 | ); 158 | } else { 159 | log::info!( 160 | "Saving torrent for info hash {} to file {}", 161 | self.info.info_hash, 162 | path 163 | ); 164 | } 165 | if let Some(parent) = path.path_ref().and_then(|p| p.parent()) { 166 | if parent != Path::new("") { 167 | create_dir_all(parent).await?; 168 | } 169 | } 170 | let buf = Bytes::from(self); 171 | path.async_write(buf).await 172 | } 173 | } 174 | 175 | macro_rules! put_kv { 176 | ($buf:ident, $key:literal, $value:expr) => { 177 | $buf.put( 178 | $key.to_bencode() 179 | .expect("string keys should be bencodable") 180 | .as_slice(), 181 | ); 182 | $buf.put( 183 | $value 184 | .to_bencode() 185 | .expect("torrent file values should be bencodable") 186 | .as_slice(), 187 | ); 188 | }; 189 | } 190 | 191 | impl From for Bytes { 192 | fn from(torrent: TorrentFile) -> Bytes { 193 | let mut buf = BytesMut::new(); 194 | buf.put_u8(b'd'); 195 | if !torrent.trackers.is_empty() { 196 | put_kv!( 197 | buf, 198 | "announce-list", 199 | torrent 200 | .trackers 201 | .into_iter() 202 | .map(|tr| vec![tr.url_string()]) 203 | .collect::>>() 204 | ); 205 | } 206 | put_kv!(buf, "created by", torrent.created_by); 207 | put_kv!(buf, "creation date", torrent.creation_date); 208 | buf.put( 209 | "info" 210 | .to_bencode() 211 | .expect("string should be bencodable") 212 | .as_slice(), 213 | ); 214 | buf.put(Bytes::from(torrent.info)); 215 | buf.put_u8(b'e'); 216 | buf.freeze() 217 | } 218 | } 219 | 220 | #[derive(Clone, Debug, Eq, PartialEq)] 221 | pub(crate) struct PathTemplate(Vec); 222 | 223 | impl PathTemplate { 224 | pub(crate) fn format(&self, name: &str, info_hash: InfoHash) -> String { 225 | let mut buf = String::new(); 226 | for elem in &self.0 { 227 | match elem { 228 | TemplateElement::Literal(s) => buf.push_str(s), 229 | TemplateElement::Name => buf.push_str(name), 230 | TemplateElement::Hash => { 231 | write!(buf, "{info_hash}").expect("fmt::writing to a String should not fail"); 232 | } 233 | } 234 | } 235 | buf 236 | } 237 | } 238 | 239 | impl FromStr for PathTemplate { 240 | type Err = PathTemplateError; 241 | 242 | fn from_str(s: &str) -> Result { 243 | let mut elems = Vec::new(); 244 | let mut buf = String::new(); 245 | let mut brace_iter = s.match_indices('{'); 246 | let mut prev_end = 0; 247 | while let Some((i, _)) = brace_iter.next() { 248 | debug_assert!( 249 | prev_end <= i, 250 | "prev_end={prev_end:?} was unexpectedly greater than i={i:?}" 251 | ); 252 | buf.push_str(&s[prev_end..i]); 253 | match s[i..] 254 | .char_indices() 255 | .skip(1) 256 | .find(|&(_, ch)| !(ch.is_ascii_alphanumeric() || ch == '_')) 257 | { 258 | Some((1, '{')) => { 259 | buf.push('{'); 260 | let _ = brace_iter.next(); 261 | prev_end = i + 2; 262 | } 263 | Some((j, '}')) => { 264 | if !buf.is_empty() { 265 | elems.push(TemplateElement::Literal(buf.replace("}}", "}"))); 266 | buf.clear(); 267 | } 268 | match &s[(i + 1)..(i + j)] { 269 | "name" => elems.push(TemplateElement::Name), 270 | "hash" => elems.push(TemplateElement::Hash), 271 | field => return Err(PathTemplateError::UnknownField(field.into())), 272 | } 273 | prev_end = i + j + 1; 274 | } 275 | Some(_) => return Err(PathTemplateError::InvalidField(i)), 276 | None => return Err(PathTemplateError::Unmatched(i)), 277 | } 278 | } 279 | buf.push_str(&s[prev_end..]); 280 | if !buf.is_empty() { 281 | elems.push(TemplateElement::Literal(buf.replace("}}", "}"))); 282 | } 283 | Ok(PathTemplate(elems)) 284 | } 285 | } 286 | 287 | #[derive(Clone, Debug, Eq, PartialEq)] 288 | enum TemplateElement { 289 | Literal(String), 290 | Name, 291 | Hash, 292 | } 293 | 294 | #[derive(Clone, Debug, Error, Eq, PartialEq)] 295 | pub(crate) enum PathTemplateError { 296 | #[error("unmatched brace at byte index {0}")] 297 | Unmatched(usize), 298 | #[error("malformed placeholder at byte index {0}")] 299 | InvalidField(usize), 300 | #[error("unknown placeholder {0:?}")] 301 | UnknownField(String), 302 | } 303 | 304 | fn check_bencode_dict(buf: &Bytes) -> Result<(), BencodeDictError> { 305 | let mut decoder = Decoder::new(buf); 306 | match decoder.next_object() { 307 | Ok(Some(Object::Dict(mut dd))) => { 308 | if dd.consume_all().is_err() { 309 | return Err(BencodeDictError::Syntax); 310 | } 311 | } 312 | Ok(Some(_)) => return Err(BencodeDictError::NotADict), 313 | Ok(None) => return Err(BencodeDictError::Empty), 314 | Err(_) => return Err(BencodeDictError::Syntax), 315 | } 316 | if !matches!(decoder.next_object(), Ok(None)) { 317 | return Err(BencodeDictError::Trailing); 318 | } 319 | Ok(()) 320 | } 321 | 322 | fn unix_now() -> i64 { 323 | match SystemTime::now().duration_since(UNIX_EPOCH) { 324 | Ok(d) => i64::try_from(d.as_secs()).unwrap_or(i64::MAX), 325 | Err(e) => i64::try_from(e.duration().as_secs()) 326 | .map(|i| -i) 327 | .unwrap_or(i64::MIN), 328 | } 329 | } 330 | 331 | fn sanitize(s: &str) -> String { 332 | static PRINTABLE_UNSANITARY: &str = "/\\<>:|\"?*"; 333 | s.chars() 334 | .map(|ch| { 335 | if ch < ' ' || PRINTABLE_UNSANITARY.contains(ch) { 336 | '_' 337 | } else { 338 | ch 339 | } 340 | }) 341 | .collect() 342 | } 343 | 344 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 345 | pub(crate) enum ConstructError { 346 | #[error("metadata size of {0} exceeds {max} limit", max = MAX_INFO_LENGTH)] 347 | TooLarge(u32), 348 | } 349 | 350 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 351 | pub(crate) enum PushError { 352 | #[error("too many metadata pieces fetched")] 353 | TooManyPieces, 354 | #[error("wrong length for metadata piece {index}: expected {expected}, got {got}")] 355 | Length { 356 | index: usize, 357 | expected: usize, 358 | got: usize, 359 | }, 360 | } 361 | 362 | #[derive(Clone, Debug, Error, Eq, PartialEq)] 363 | pub(crate) enum BuildError { 364 | #[error(transparent)] 365 | Bencode(#[from] BencodeDictError), 366 | #[error("not all metadata pieces fetched; {left} remaining")] 367 | NotFinished { left: usize }, 368 | #[error("info hash mismatch: expected {expected}, got {got:x}")] 369 | Digest { expected: InfoHash, got: Bytes }, 370 | } 371 | 372 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 373 | pub(crate) enum BencodeDictError { 374 | #[error("data is not valid bencode")] 375 | Syntax, 376 | #[error("data is not a bencode dict")] 377 | NotADict, 378 | #[error("data is empty")] 379 | Empty, 380 | #[error("data has trailing bytes after bencode dict")] 381 | Trailing, 382 | } 383 | 384 | #[cfg(test)] 385 | mod tests { 386 | use super::*; 387 | 388 | #[test] 389 | fn test_check_good_bencode_dict() { 390 | let buf = Bytes::from(b"d3:foo3:bar3:keyi42ee".as_slice()); 391 | assert_eq!(check_bencode_dict(&buf), Ok(())); 392 | } 393 | 394 | #[test] 395 | fn test_check_invalid_bencode() { 396 | let buf = Bytes::from(b"d3:keyi42e8:no valuee".as_slice()); 397 | assert_eq!(check_bencode_dict(&buf), Err(BencodeDictError::Syntax)); 398 | } 399 | 400 | #[test] 401 | fn test_check_bencode_non_dict() { 402 | let buf = Bytes::from(b"l3:keyi42e8:no valuee".as_slice()); 403 | assert_eq!(check_bencode_dict(&buf), Err(BencodeDictError::NotADict)); 404 | } 405 | 406 | #[test] 407 | fn test_check_empty_bencode() { 408 | let buf = Bytes::new(); 409 | assert_eq!(check_bencode_dict(&buf), Err(BencodeDictError::Empty)); 410 | } 411 | 412 | #[test] 413 | fn test_check_trailing_bencode() { 414 | let buf = Bytes::from(b"d3:foo3:bar3:keyi42ee5:extra".as_slice()); 415 | assert_eq!(check_bencode_dict(&buf), Err(BencodeDictError::Trailing)); 416 | } 417 | 418 | #[test] 419 | fn test_torrent_info_builder() { 420 | let metadata_size = 40 << 10; 421 | let info_hash = "fd33560457eae4b165bc5e7f7de6f24db61e957e" 422 | .parse::() 423 | .unwrap(); 424 | let mut builder = TorrentInfoBuilder::new(info_hash, metadata_size).unwrap(); 425 | assert_eq!(builder.next_piece(), Some(0)); 426 | let mut piece0 = BytesMut::with_capacity(16 << 10); 427 | piece0.put(b"d4:name15:My Test Torrent11:xtra-filler40914:".as_slice()); 428 | piece0.put_bytes(0, 16339); 429 | builder.push(piece0.freeze()).unwrap(); 430 | assert_eq!(builder.next_piece(), Some(1)); 431 | builder.push(BytesMut::zeroed(16 << 10).freeze()).unwrap(); 432 | assert_eq!(builder.next_piece(), Some(2)); 433 | let mut piece2 = BytesMut::with_capacity(8 << 10); 434 | piece2.put_bytes(0, (8 << 10) - 1); 435 | piece2.put_u8(b'e'); 436 | builder.push(piece2.freeze()).unwrap(); 437 | assert_eq!(builder.next_piece(), None); 438 | let info = builder.build().unwrap(); 439 | assert_eq!(info.name(), Some(Cow::from("My Test Torrent"))); 440 | } 441 | 442 | #[test] 443 | fn test_torrent_file_into_bytes() { 444 | let info = TorrentInfo { 445 | info_hash: "ddbf90f0d41c8f91a555192279845bc45e530ec9".parse::().unwrap(), 446 | data: Bytes::from(b"d6:lengthi42e4:name8:blob.dat12:piece lengthi65535e6:pieces20:00000000000000000000e".as_slice()), 447 | }; 448 | let torrent = TorrentFile { 449 | info, 450 | trackers: vec![ 451 | "http://tracker.example.com:8080/announce" 452 | .parse::() 453 | .unwrap() 454 | .into(), 455 | "udp://bits.example.net:9001" 456 | .parse::() 457 | .unwrap() 458 | .into(), 459 | ], 460 | creation_date: 1686939764, 461 | created_by: "demagnetize vDEV".into(), 462 | }; 463 | let buf = Bytes::from(torrent); 464 | assert_eq!(buf, b"d13:announce-listll40:http://tracker.example.com:8080/announceel27:udp://bits.example.net:9001ee10:created by16:demagnetize vDEV13:creation datei1686939764e4:infod6:lengthi42e4:name8:blob.dat12:piece lengthi65535e6:pieces20:00000000000000000000ee".as_slice()); 465 | check_bencode_dict(&buf).unwrap(); 466 | } 467 | 468 | #[test] 469 | fn test_path_template() { 470 | let template = "Torrent-{name}-{hash}.torrent" 471 | .parse::() 472 | .unwrap(); 473 | let info_hash = "ddbf90f0d41c8f91a555192279845bc45e530ec9" 474 | .parse::() 475 | .unwrap(); 476 | assert_eq!( 477 | template.format("My Test Torrent", info_hash), 478 | "Torrent-My Test Torrent-ddbf90f0d41c8f91a555192279845bc45e530ec9.torrent" 479 | ); 480 | } 481 | 482 | #[test] 483 | fn test_path_template_escaped_braces() { 484 | let template = "Torrent-{{{name}}}-{hash}.torrent" 485 | .parse::() 486 | .unwrap(); 487 | let info_hash = "ddbf90f0d41c8f91a555192279845bc45e530ec9" 488 | .parse::() 489 | .unwrap(); 490 | assert_eq!( 491 | template.format("My Test Torrent", info_hash), 492 | "Torrent-{My Test Torrent}-ddbf90f0d41c8f91a555192279845bc45e530ec9.torrent" 493 | ); 494 | } 495 | 496 | #[test] 497 | fn test_path_template_no_leading_or_trailing_literals() { 498 | let template = "{name}-{hash}".parse::().unwrap(); 499 | let info_hash = "ddbf90f0d41c8f91a555192279845bc45e530ec9" 500 | .parse::() 501 | .unwrap(); 502 | assert_eq!( 503 | template.format("My Test Torrent", info_hash), 504 | "My Test Torrent-ddbf90f0d41c8f91a555192279845bc45e530ec9" 505 | ); 506 | } 507 | 508 | #[test] 509 | fn test_path_template_unmatched() { 510 | let e = "torrent={name".parse::().unwrap_err(); 511 | assert_eq!(e, PathTemplateError::Unmatched(8)); 512 | assert_eq!(e.to_string(), "unmatched brace at byte index 8"); 513 | } 514 | 515 | #[test] 516 | fn test_path_template_nested_field() { 517 | let e = "{name{hash}torrent}".parse::().unwrap_err(); 518 | assert_eq!(e, PathTemplateError::InvalidField(0)); 519 | assert_eq!(e.to_string(), "malformed placeholder at byte index 0"); 520 | } 521 | 522 | #[test] 523 | fn test_path_template_invalid_field() { 524 | let e = "torrent={name+hash}".parse::().unwrap_err(); 525 | assert_eq!(e, PathTemplateError::InvalidField(8)); 526 | assert_eq!(e.to_string(), "malformed placeholder at byte index 8"); 527 | } 528 | 529 | #[test] 530 | fn test_path_template_unknown_field() { 531 | let e = "torrent={tracker}.torrent" 532 | .parse::() 533 | .unwrap_err(); 534 | assert_eq!(e, PathTemplateError::UnknownField("tracker".into())); 535 | assert_eq!(e.to_string(), "unknown placeholder \"tracker\""); 536 | } 537 | } 538 | -------------------------------------------------------------------------------- /src/tracker/http.rs: -------------------------------------------------------------------------------- 1 | use super::{AnnounceResponse, Announcement, TrackerError, TrackerUrlError}; 2 | use crate::peer::Peer; 3 | use crate::util::{decode_bencode, TryBytes, UnbencodeError}; 4 | use bendy::decoding::{Error as BendyError, FromBencode, Object, ResultExt}; 5 | use reqwest::Client; 6 | use std::fmt; 7 | use std::net::{SocketAddrV4, SocketAddrV6}; 8 | use thiserror::Error; 9 | use url::Url; 10 | 11 | static USER_AGENT: &str = concat!( 12 | env!("CARGO_PKG_NAME"), 13 | "/", 14 | env!("CARGO_PKG_VERSION"), 15 | " (", 16 | env!("CARGO_PKG_REPOSITORY"), 17 | ")", 18 | ); 19 | 20 | #[derive(Clone, Debug, Eq, PartialEq)] 21 | pub(crate) struct HttpTracker(Url); 22 | 23 | impl HttpTracker { 24 | pub(crate) fn url_string(&self) -> String { 25 | self.0.to_string() 26 | } 27 | 28 | pub(super) fn connect(&self) -> Result { 29 | let client = Client::builder() 30 | .user_agent(USER_AGENT) 31 | .build() 32 | .map_err(HttpTrackerError::BuildClient)?; 33 | Ok(HttpTrackerSession { 34 | tracker: self.clone(), 35 | client, 36 | }) 37 | } 38 | } 39 | 40 | impl fmt::Display for HttpTracker { 41 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 42 | write!(f, "", self.0) 43 | } 44 | } 45 | 46 | impl TryFrom for HttpTracker { 47 | type Error = TrackerUrlError; 48 | 49 | fn try_from(url: Url) -> Result { 50 | let sch = url.scheme(); 51 | if sch != "http" && sch != "https" { 52 | return Err(TrackerUrlError::UnsupportedScheme(sch.into())); 53 | } 54 | if url.host().is_none() { 55 | return Err(TrackerUrlError::NoHost); 56 | } 57 | Ok(HttpTracker(url)) 58 | } 59 | } 60 | 61 | pub(super) struct HttpTrackerSession { 62 | pub(super) tracker: HttpTracker, 63 | client: Client, 64 | } 65 | 66 | impl HttpTrackerSession { 67 | pub(super) async fn announce( 68 | &self, 69 | announcement: Announcement, 70 | ) -> Result { 71 | let mut url = self.tracker.0.clone(); 72 | url.set_fragment(None); 73 | announcement.event.add_query_param(&mut url); 74 | announcement.info_hash.add_query_param(&mut url); 75 | announcement.peer_id.add_query_param(&mut url); 76 | announcement.crypto.add_query_param(&mut url); 77 | url.query_pairs_mut() 78 | .append_pair("port", &announcement.port.to_string()) 79 | .append_pair("uploaded", &announcement.uploaded.to_string()) 80 | .append_pair("downloaded", &announcement.downloaded.to_string()) 81 | .append_pair("left", &announcement.left.to_string()) 82 | .append_pair("numwant", &announcement.numwant.to_string()) 83 | .append_pair("key", &announcement.key.to_string()) 84 | .append_pair("compact", "1"); 85 | let buf = self 86 | .client 87 | .get(url) 88 | .send() 89 | .await 90 | .map_err(HttpTrackerError::SendRequest)? 91 | .error_for_status() 92 | .map_err(HttpTrackerError::HttpStatus)? 93 | .bytes() 94 | .await 95 | .map_err(HttpTrackerError::ReadBody)?; 96 | decode_bencode::(&buf) 97 | .map_err(HttpTrackerError::ParseResponse)? 98 | .result() 99 | } 100 | } 101 | 102 | #[derive(Clone, Debug, Eq, PartialEq)] 103 | enum HttpAnnounceResponse { 104 | Success(AnnounceResponse), 105 | Failure(String), 106 | } 107 | 108 | impl HttpAnnounceResponse { 109 | fn result(self) -> Result { 110 | match self { 111 | HttpAnnounceResponse::Success(announcement) => Ok(announcement), 112 | HttpAnnounceResponse::Failure(msg) => Err(TrackerError::Failure(msg)), 113 | } 114 | } 115 | } 116 | 117 | impl FromBencode for HttpAnnounceResponse { 118 | fn decode_bencode_object(object: Object<'_, '_>) -> Result { 119 | let mut interval = None; 120 | let mut peers = Vec::new(); 121 | let mut warning_message = None; 122 | let mut min_interval = None; 123 | let mut tracker_id = None; 124 | let mut complete = None; 125 | let mut incomplete = None; 126 | let mut crypto_flags = None; 127 | let mut dd = object.try_into_dictionary()?; 128 | while let Some(kv) = dd.next_pair()? { 129 | match kv { 130 | (b"failure reason", v) => { 131 | let reason = 132 | String::from_utf8_lossy(v.try_into_bytes().context("failure reason")?) 133 | .into_owned(); 134 | return Ok(HttpAnnounceResponse::Failure(reason)); 135 | } 136 | (b"interval", v) => { 137 | interval = Some(u32::decode_bencode_object(v).context("interval")?); 138 | } 139 | (b"crypto_flags", v) => { 140 | crypto_flags = Some( 141 | v.try_into_bytes() 142 | .context("crypto_flags")? 143 | .iter() 144 | .map(|&b| b != 0) 145 | .collect::>(), 146 | ); 147 | } 148 | (b"peers", v) => { 149 | debug_assert!(peers.is_empty(), "peers should not be populated before reaching `peers` field of HTTP tracker response"); 150 | if matches!(v, Object::List(_)) { 151 | // Original, non-compact format (BEP 3) 152 | peers.extend(Vec::::decode_bencode_object(v).context("peers")?); 153 | } else { 154 | // Compact format (BEP 23) 155 | let buf = TryBytes::from(v.try_into_bytes().context("peers")?); 156 | let addrs = match buf.try_get_all::() { 157 | Ok(addrs) => addrs, 158 | Err(e) => { 159 | return Err( 160 | BendyError::malformed_content(Box::new(e)).context("peers") 161 | ); 162 | } 163 | }; 164 | peers.extend(addrs.into_iter().map(Peer::from)); 165 | } 166 | if let Some(ref flags) = crypto_flags { 167 | for (pr, &flag) in std::iter::zip(&mut peers, flags) { 168 | if flag { 169 | pr.requires_crypto = true; 170 | } 171 | } 172 | } 173 | } 174 | (b"peers6", v) => { 175 | // Compact format (BEP 7) 176 | let buf = TryBytes::from(v.try_into_bytes().context("peers6")?); 177 | let addrs = match buf.try_get_all::() { 178 | Ok(addrs) => addrs, 179 | Err(e) => { 180 | return Err( 181 | BendyError::malformed_content(Box::new(e)).context("peers6") 182 | ); 183 | } 184 | }; 185 | peers.extend(addrs.into_iter().map(Peer::from)); 186 | } 187 | (b"warning message", v) => { 188 | warning_message = Some( 189 | String::from_utf8_lossy(v.try_into_bytes().context("warning message")?) 190 | .into_owned(), 191 | ); 192 | } 193 | (b"min interval", v) => { 194 | min_interval = Some(u32::decode_bencode_object(v).context("min interval")?); 195 | } 196 | (b"tracker id", v) => { 197 | tracker_id = Some(bytes::Bytes::from( 198 | v.try_into_bytes().context("tracker id")?.to_vec(), 199 | )); 200 | } 201 | (b"complete", v) => { 202 | complete = Some(u32::decode_bencode_object(v).context("complete")?); 203 | } 204 | (b"incomplete", v) => { 205 | incomplete = Some(u32::decode_bencode_object(v).context("incomplete")?); 206 | } 207 | _ => (), 208 | } 209 | } 210 | let interval = interval.ok_or_else(|| BendyError::missing_field("interval"))?; 211 | Ok(HttpAnnounceResponse::Success(AnnounceResponse { 212 | interval, 213 | peers, 214 | warning_message, 215 | min_interval, 216 | tracker_id, 217 | complete, 218 | incomplete, 219 | leechers: None, 220 | seeders: None, 221 | })) 222 | } 223 | } 224 | 225 | #[derive(Debug, Error)] 226 | pub(crate) enum HttpTrackerError { 227 | #[error("failed to build HTTP client")] 228 | BuildClient(#[source] reqwest::Error), 229 | #[error("failed to send request to HTTP tracker")] 230 | SendRequest(#[source] reqwest::Error), 231 | #[error("HTTP tracker responded with HTTP error")] 232 | HttpStatus(#[source] reqwest::Error), 233 | #[error("failed to read HTTP tracker response")] 234 | ReadBody(#[source] reqwest::Error), 235 | #[error("failed to parse HTTP tracker response")] 236 | ParseResponse(#[source] UnbencodeError), 237 | } 238 | 239 | #[cfg(test)] 240 | mod tests { 241 | use super::*; 242 | use crate::types::PeerId; 243 | use bytes::{BufMut, BytesMut}; 244 | use std::net::SocketAddr; 245 | 246 | #[test] 247 | fn test_decode_response() { 248 | let mut buf = BytesMut::new(); 249 | buf.put(b"d8:completei47e10:incompletei5e8:intervali1800e12:min inter".as_slice()); 250 | buf.put(b"vali1800e5:peers300:w\x94bls\xdf\xd8\xb4C,\x1a\xe1\xba\x16".as_slice()); 251 | buf.put(b"\xdf\xe8\x0f0\xc1(\r\xab\xc8\xd5\xb32\xe9\xec\x86~\xd4UX]A".as_slice()); 252 | buf.put(b"\xf1-\x0e\xc30\xd6\xa1g\xd9\xe8z\xcbv\xbfe\xaen\xdc\xb41%/G".as_slice()); 253 | buf.put(b"\xa1\xa7N\xc5i\x88A\xf1Bs\x93\xd1\x00\x01\x9a\x1d\x83\xb6".as_slice()); 254 | buf.put(b"\xc8\xd5\xbc\xd18-N\\-[\x17\x85\xc8\xd5\xb9U\x96>ZIH\x15".as_slice()); 255 | buf.put(b"\x11\x05\xebT\xc2%`S\x83\xb0\\V\x90\x04SY\xb3\xec\xac\x06".as_slice()); 256 | buf.put(b"\xaa[Wns\x08.\xc9\xc8o\xe0\xf5\x94\xe8R\xaa\xe86\x1a\xe1".as_slice()); 257 | buf.put(b"\x1b!nb\xc4\x85Mn\x83*\xa15\x17\x13\x8dp\xb3\xe9\xc1(\r\xa2".as_slice()); 258 | buf.put(b"\xb3\x8c\xc3\xf6xC-\x15\xb9\x95Z\t\xc7I-\x0c\xdct\xc8\xd5E".as_slice()); 259 | buf.put(b"\xaaM\xe3\x10\x9f\xd5\xf5\xb3;z\xed\xd9\x8a\xd5\x1d\xa1\x0b".as_slice()); 260 | buf.put(b"\xd4f9K!\x9b\xb9\x99\xb3\x1b\xe6'gW\xd6\xdeu\xcfeXO\xc1\xc8".as_slice()); 261 | buf.put(b"-\xd4:x<.\xa6\x18%a\x1a\xc8\xd5\xc1 \x7f\x98\xc8\xd5\xd9".as_slice()); 262 | buf.put(b"\x8a\xc2^\xe0\x02m\xc9\x98\xa6\x00\x01\xb9\xba\xf9\t\x1a".as_slice()); 263 | buf.put(b"\xe1\x86\x13\xbc[\xce\x9bD\xeb,G\xd9|.5\xfd\xf1\xd4\xf1\x1b".as_slice()); 264 | buf.put(b"\"\x14I\xa4\xbe\xb0)\x1b\xedp\xcb\xac\xf1\xe0.I\x84m\xc9".as_slice()); 265 | buf.put(b"\x98\xaf\x00\x01e".as_slice()); 266 | let res = decode_bencode::(&buf).unwrap(); 267 | let HttpAnnounceResponse::Success(announcement) = res else { 268 | panic!("Announcement failed"); 269 | }; 270 | assert_eq!( 271 | announcement, 272 | AnnounceResponse { 273 | interval: 1800, 274 | peers: vec![ 275 | "119.148.98.108:29663".parse::().unwrap(), 276 | "216.180.67.44:6881".parse::().unwrap(), 277 | "186.22.223.232:3888".parse::().unwrap(), 278 | "193.40.13.171:51413".parse::().unwrap(), 279 | "179.50.233.236:34430".parse::().unwrap(), 280 | "212.85.88.93:16881".parse::().unwrap(), 281 | "45.14.195.48:54945".parse::().unwrap(), 282 | "103.217.232.122:52086".parse::().unwrap(), 283 | "191.101.174.110:56500".parse::().unwrap(), 284 | "49.37.47.71:41383".parse::().unwrap(), 285 | "78.197.105.136:16881".parse::().unwrap(), 286 | "66.115.147.209:1".parse::().unwrap(), 287 | "154.29.131.182:51413".parse::().unwrap(), 288 | "188.209.56.45:20060".parse::().unwrap(), 289 | "45.91.23.133:51413".parse::().unwrap(), 290 | "185.85.150.62:23113".parse::().unwrap(), 291 | "72.21.17.5:60244".parse::().unwrap(), 292 | "194.37.96.83:33712".parse::().unwrap(), 293 | "92.86.144.4:21337".parse::().unwrap(), 294 | "179.236.172.6:43611".parse::().unwrap(), 295 | "87.110.115.8:11977".parse::().unwrap(), 296 | "200.111.224.245:38120".parse::().unwrap(), 297 | "82.170.232.54:6881".parse::().unwrap(), 298 | "27.33.110.98:50309".parse::().unwrap(), 299 | "77.110.131.42:41269".parse::().unwrap(), 300 | "23.19.141.112:46057".parse::().unwrap(), 301 | "193.40.13.162:45964".parse::().unwrap(), 302 | "195.246.120.67:11541".parse::().unwrap(), 303 | "185.149.90.9:51017".parse::().unwrap(), 304 | "45.12.220.116:51413".parse::().unwrap(), 305 | "69.170.77.227:4255".parse::().unwrap(), 306 | "213.245.179.59:31469".parse::().unwrap(), 307 | "217.138.213.29:41227".parse::().unwrap(), 308 | "212.102.57.75:8603".parse::().unwrap(), 309 | "185.153.179.27:58919".parse::().unwrap(), 310 | "103.87.214.222:30159".parse::().unwrap(), 311 | "101.88.79.193:51245".parse::().unwrap(), 312 | "212.58.120.60:11942".parse::().unwrap(), 313 | "24.37.97.26:51413".parse::().unwrap(), 314 | "193.32.127.152:51413".parse::().unwrap(), 315 | "217.138.194.94:57346".parse::().unwrap(), 316 | "109.201.152.166:1".parse::().unwrap(), 317 | "185.186.249.9:6881".parse::().unwrap(), 318 | "134.19.188.91:52891".parse::().unwrap(), 319 | "68.235.44.71:55676".parse::().unwrap(), 320 | "46.53.253.241:54513".parse::().unwrap(), 321 | "27.34.20.73:42174".parse::().unwrap(), 322 | "176.41.27.237:28875".parse::().unwrap(), 323 | "172.241.224.46:18820".parse::().unwrap(), 324 | "109.201.152.175:1".parse::().unwrap(), 325 | ], 326 | warning_message: None, 327 | min_interval: Some(1800), 328 | tracker_id: None, 329 | complete: Some(47), 330 | incomplete: Some(5), 331 | leechers: None, 332 | seeders: None, 333 | } 334 | ); 335 | } 336 | 337 | #[test] 338 | fn test_decode_response_with_peers6() { 339 | let res = decode_bencode::( 340 | b"d8:intervali1800e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe", 341 | ) 342 | .unwrap(); 343 | let HttpAnnounceResponse::Success(announcement) = res else { 344 | panic!("Announcement failed"); 345 | }; 346 | assert_eq!( 347 | announcement, 348 | AnnounceResponse { 349 | interval: 1800, 350 | peers: vec![ 351 | "105.105.105.105:28784".parse::().unwrap(), 352 | "[6969:6969:6969:6969:6969:6969:6969:6969]:28784" 353 | .parse::() 354 | .unwrap(), 355 | ], 356 | warning_message: None, 357 | min_interval: None, 358 | tracker_id: None, 359 | complete: None, 360 | incomplete: None, 361 | leechers: None, 362 | seeders: None, 363 | } 364 | ); 365 | } 366 | 367 | #[test] 368 | fn test_decode_response_bad_peers6() { 369 | let e = decode_bencode::( 370 | b"d8:completei45e10:downloadedi8384e10:incompletei4e8:intervali900e12:min intervali300e6:peers66:\x00\x00\x00\x00\x00\x0010:tracker id7:AniRenae" 371 | ).unwrap_err(); 372 | let UnbencodeError::Bendy(e) = e else { 373 | panic!("Error was not raised within FromBencode"); 374 | }; 375 | assert_eq!( 376 | e.to_string(), 377 | "Error: malformed content discovered: unexpected end of packet in peers6" 378 | ); 379 | } 380 | 381 | #[test] 382 | fn test_decode_failure_response() { 383 | let res = decode_bencode::(b"d14:failure reason14:too much stuffe") 384 | .unwrap(); 385 | assert_eq!(res, HttpAnnounceResponse::Failure("too much stuff".into())); 386 | } 387 | 388 | #[test] 389 | fn test_decode_noncompact_response() { 390 | let res = decode_bencode::( 391 | b"d8:completei431e10:incompletei14e8:intervali1800e5:peersld2:ip22:2001:41d0:1004:20b5::17:peer id20:-TR3000-23xhfykztwo84:porti51413eed2:ip18:2001:41d0:e:907::17:peer id20:-lt0D80-\xf8\x01\x92N+!{\x06\xcc\x15\xf0\xc44:porti12179eed2:ip14:185.125.190.597:peer id20:T03I--00N4b1YqQdAWh44:porti6892eed2:ip19:2403:5812:a03e::2227:peer id20:-TR3000-83e2ltycmh6c4:porti51413eed2:ip37:2003:f1:6f0f:dd00:c0ab:7cff:febd:274a7:peer id20:-TR3000-9e0zt0knchh44:porti51413eeee" 392 | ).unwrap(); 393 | let HttpAnnounceResponse::Success(announcement) = res else { 394 | panic!("Announcement failed"); 395 | }; 396 | assert_eq!( 397 | announcement, 398 | AnnounceResponse { 399 | interval: 1800, 400 | peers: vec![ 401 | Peer { 402 | address: "[2001:41d0:1004:20b5::1]:51413" 403 | .parse::() 404 | .unwrap(), 405 | id: Some(PeerId::from(b"-TR3000-23xhfykztwo8")), 406 | requires_crypto: false, 407 | }, 408 | Peer { 409 | address: "[2001:41d0:e:907::1]:12179".parse::().unwrap(), 410 | id: Some(PeerId::from( 411 | b"-lt0D80-\xf8\x01\x92N+!{\x06\xcc\x15\xf0\xc4" 412 | )), 413 | requires_crypto: false, 414 | }, 415 | Peer { 416 | address: "185.125.190.59:6892".parse::().unwrap(), 417 | id: Some(PeerId::from(b"T03I--00N4b1YqQdAWh4")), 418 | requires_crypto: false, 419 | }, 420 | Peer { 421 | address: "[2403:5812:a03e::222]:51413".parse::().unwrap(), 422 | id: Some(PeerId::from(b"-TR3000-83e2ltycmh6c")), 423 | requires_crypto: false, 424 | }, 425 | Peer { 426 | address: "[2003:f1:6f0f:dd00:c0ab:7cff:febd:274a]:51413" 427 | .parse::() 428 | .unwrap(), 429 | id: Some(PeerId::from(b"-TR3000-9e0zt0knchh4")), 430 | requires_crypto: false, 431 | }, 432 | ], 433 | warning_message: None, 434 | min_interval: None, 435 | tracker_id: None, 436 | complete: Some(431), 437 | incomplete: Some(14), 438 | leechers: None, 439 | seeders: None, 440 | } 441 | ); 442 | } 443 | 444 | #[test] 445 | fn test_decode_noncompact_response_no_peer_id() { 446 | let res = decode_bencode::( 447 | b"d8:intervali900e5:peersld2:ip13:62.11.247.2494:porti8012eed2:ip13:185.148.1.1584:porti24810eed2:ip14:108.51.168.1554:porti51413eed2:ip14:207.179.235.144:porti49192eeee" 448 | ).unwrap(); 449 | let HttpAnnounceResponse::Success(announcement) = res else { 450 | panic!("Announcement failed"); 451 | }; 452 | assert_eq!( 453 | announcement, 454 | AnnounceResponse { 455 | interval: 900, 456 | peers: vec![ 457 | "62.11.247.249:8012".parse::().unwrap(), 458 | "185.148.1.158:24810".parse::().unwrap(), 459 | "108.51.168.155:51413".parse::().unwrap(), 460 | "207.179.235.14:49192".parse::().unwrap(), 461 | ], 462 | warning_message: None, 463 | min_interval: None, 464 | tracker_id: None, 465 | complete: None, 466 | incomplete: None, 467 | leechers: None, 468 | seeders: None, 469 | } 470 | ); 471 | } 472 | 473 | #[test] 474 | fn test_decode_response_with_crypto_flags() { 475 | let mut buf = BytesMut::new(); 476 | buf.put(b"d8:completei47e12:crypto_flags5:\x00\x01\x01\x00\x0110:in".as_slice()); 477 | buf.put(b"completei5e8:intervali1800e12:min intervali1800e5:peers30".as_slice()); 478 | buf.put(b":w\x94bls\xdf\xd8\xb4C,\x1a\xe1\xba\x16\xdf\xe8\x0f0\xc1(".as_slice()); 479 | buf.put(b"\r\xab\xc8\xd5\xb32\xe9\xec\x86~e".as_slice()); 480 | let res = decode_bencode::(&buf).unwrap(); 481 | let HttpAnnounceResponse::Success(announcement) = res else { 482 | panic!("Announcement failed"); 483 | }; 484 | assert_eq!( 485 | announcement, 486 | AnnounceResponse { 487 | interval: 1800, 488 | peers: vec![ 489 | Peer { 490 | address: "119.148.98.108:29663".parse::().unwrap(), 491 | id: None, 492 | requires_crypto: false, 493 | }, 494 | Peer { 495 | address: "216.180.67.44:6881".parse::().unwrap(), 496 | id: None, 497 | requires_crypto: true, 498 | }, 499 | Peer { 500 | address: "186.22.223.232:3888".parse::().unwrap(), 501 | id: None, 502 | requires_crypto: true, 503 | }, 504 | Peer { 505 | address: "193.40.13.171:51413".parse::().unwrap(), 506 | id: None, 507 | requires_crypto: false, 508 | }, 509 | Peer { 510 | address: "179.50.233.236:34430".parse::().unwrap(), 511 | id: None, 512 | requires_crypto: true, 513 | }, 514 | ], 515 | warning_message: None, 516 | min_interval: Some(1800), 517 | tracker_id: None, 518 | complete: Some(47), 519 | incomplete: Some(5), 520 | leechers: None, 521 | seeders: None, 522 | } 523 | ); 524 | } 525 | } 526 | -------------------------------------------------------------------------------- /src/tracker/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod http; 2 | pub(crate) mod udp; 3 | use self::http::*; 4 | use self::udp::*; 5 | use crate::app::App; 6 | use crate::consts::LEFT; 7 | use crate::peer::Peer; 8 | use crate::types::{InfoHash, Key, PeerId}; 9 | use crate::util::{comma_list, ErrorChain}; 10 | use bytes::Bytes; 11 | use std::fmt; 12 | use std::str::FromStr; 13 | use std::sync::Arc; 14 | use thiserror::Error; 15 | use tokio::time::timeout; 16 | use url::Url; 17 | 18 | #[derive(Clone, Debug, Eq, PartialEq)] 19 | pub(crate) enum Tracker { 20 | Http(HttpTracker), 21 | Udp(UdpTracker), 22 | } 23 | 24 | impl Tracker { 25 | pub(crate) fn url_string(&self) -> String { 26 | match self { 27 | Tracker::Http(tr) => tr.url_string(), 28 | Tracker::Udp(tr) => tr.url_string(), 29 | } 30 | } 31 | 32 | pub(crate) fn peer_getter(&self, info_hash: InfoHash, app: Arc) -> PeerGetter<'_> { 33 | PeerGetter { 34 | tracker: self, 35 | info_hash, 36 | app, 37 | tracker_crypto: None, 38 | } 39 | } 40 | } 41 | 42 | #[derive(Clone, Debug)] 43 | pub(crate) struct PeerGetter<'a> { 44 | tracker: &'a Tracker, 45 | info_hash: InfoHash, 46 | app: Arc, 47 | tracker_crypto: Option, 48 | } 49 | 50 | impl PeerGetter<'_> { 51 | pub(crate) fn tracker_crypto(mut self, tc: Option) -> Self { 52 | self.tracker_crypto = tc; 53 | self 54 | } 55 | 56 | pub(crate) async fn run(self) -> Result, TrackerError> { 57 | log::info!( 58 | "Requesting peers for {} from {}", 59 | self.info_hash, 60 | self.tracker 61 | ); 62 | timeout( 63 | self.app.cfg.trackers.announce_timeout, 64 | self.inner_get_peers(), 65 | ) 66 | .await 67 | .unwrap_or(Err(TrackerError::Timeout)) 68 | } 69 | 70 | async fn inner_get_peers(self) -> Result, TrackerError> { 71 | let mut s = self.connect().await?; 72 | let peers = s.start().await?.peers; 73 | let display = self.tracker.to_string(); 74 | let info_hash = self.info_hash; 75 | log::info!("{display} returned {} peers for {info_hash}", peers.len()); 76 | log::debug!( 77 | "{display} returned peers for {info_hash}: {}", 78 | comma_list(&peers) 79 | ); 80 | self.app.shutdown_group.spawn(|token| async move { 81 | tokio::select! { 82 | () = token.cancelled() => log::trace!(r#""stopped" announcement to {display} for {info_hash} cancelled"#), 83 | r = s.stop() => { 84 | if let Err(e) = r { 85 | log::warn!( 86 | r#"failure sending "stopped" announcement to {display} for {info_hash}: {}"#, 87 | ErrorChain(e) 88 | ); 89 | } 90 | } 91 | } 92 | }); 93 | Ok(peers) 94 | } 95 | 96 | async fn connect(&self) -> Result { 97 | let inner = match self.tracker { 98 | Tracker::Http(t) => InnerTrackerSession::Http(t.connect()?), 99 | Tracker::Udp(t) => InnerTrackerSession::Udp(t.connect().await?), 100 | }; 101 | Ok(TrackerSession { 102 | inner, 103 | info_hash: self.info_hash, 104 | app: Arc::clone(&self.app), 105 | tracker_crypto: self.tracker_crypto, 106 | }) 107 | } 108 | } 109 | 110 | impl fmt::Display for Tracker { 111 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 112 | match self { 113 | Tracker::Http(http) => write!(f, "{http}"), 114 | Tracker::Udp(udp) => write!(f, "{udp}"), 115 | } 116 | } 117 | } 118 | 119 | impl FromStr for Tracker { 120 | type Err = TrackerUrlError; 121 | 122 | fn from_str(s: &str) -> Result { 123 | let url = Url::parse(s)?; 124 | match url.scheme() { 125 | "http" | "https" => Ok(Tracker::Http(HttpTracker::try_from(url)?)), 126 | "udp" => Ok(Tracker::Udp(UdpTracker::try_from(url)?)), 127 | sch => Err(TrackerUrlError::UnsupportedScheme(sch.into())), 128 | } 129 | } 130 | } 131 | 132 | struct TrackerSession { 133 | inner: InnerTrackerSession, 134 | info_hash: InfoHash, 135 | app: Arc, 136 | tracker_crypto: Option, 137 | } 138 | 139 | enum InnerTrackerSession { 140 | Http(HttpTrackerSession), 141 | Udp(UdpTrackerSession), 142 | } 143 | 144 | impl TrackerSession { 145 | fn get_tracker_crypto(&self) -> TrackerCrypto { 146 | self.tracker_crypto 147 | .unwrap_or_else(|| self.app.cfg.general.encrypt.get_tracker_crypto()) 148 | } 149 | 150 | fn tracker_display(&self) -> String { 151 | match &self.inner { 152 | InnerTrackerSession::Http(s) => s.tracker.to_string(), 153 | InnerTrackerSession::Udp(s) => s.tracker.to_string(), 154 | } 155 | } 156 | 157 | async fn start(&mut self) -> Result { 158 | log::trace!( 159 | r#"Sending "started" announcement to {} for {}"#, 160 | self.tracker_display(), 161 | self.info_hash 162 | ); 163 | self.announce(Announcement { 164 | info_hash: self.info_hash, 165 | peer_id: self.app.local.id, 166 | downloaded: 0, 167 | left: LEFT, 168 | uploaded: 0, 169 | event: AnnounceEvent::Started, 170 | key: self.app.local.key, 171 | numwant: self.app.cfg.trackers.numwant.get(), 172 | port: self.app.local.port, 173 | crypto: self.get_tracker_crypto(), 174 | }) 175 | .await 176 | } 177 | 178 | async fn stop(&mut self) -> Result { 179 | log::trace!( 180 | r#"Sending "stopped" announcement to {} for {}"#, 181 | self.tracker_display(), 182 | self.info_hash 183 | ); 184 | self.announce(Announcement { 185 | info_hash: self.info_hash, 186 | peer_id: self.app.local.id, 187 | downloaded: 0, 188 | left: LEFT, 189 | uploaded: 0, 190 | event: AnnounceEvent::Stopped, 191 | key: self.app.local.key, 192 | numwant: self.app.cfg.trackers.numwant.get(), 193 | port: self.app.local.port, 194 | crypto: self.get_tracker_crypto(), 195 | }) 196 | .await 197 | } 198 | 199 | async fn announce( 200 | &mut self, 201 | announcement: Announcement, 202 | ) -> Result { 203 | let announcement = match &mut self.inner { 204 | InnerTrackerSession::Http(s) => s.announce(announcement).await?, 205 | InnerTrackerSession::Udp(s) => s.announce(announcement).await?, 206 | }; 207 | if let Some(msg) = announcement.warning_message.as_ref() { 208 | log::trace!( 209 | "{} replied with warning in response to {} announcement: {:?}", 210 | self.tracker_display(), 211 | self.info_hash, 212 | msg, 213 | ); 214 | } 215 | Ok(announcement) 216 | } 217 | } 218 | 219 | #[derive(Clone, Debug, Error, Eq, PartialEq)] 220 | pub(crate) enum TrackerUrlError { 221 | #[error("invalid tracker URL")] 222 | Url(#[from] url::ParseError), 223 | #[error("unsupported tracker URL scheme: {0:?}")] 224 | UnsupportedScheme(String), 225 | #[error("no host in tracker URL")] 226 | NoHost, 227 | #[error("no port in UDP tracker URL")] 228 | NoUdpPort, 229 | } 230 | 231 | #[derive(Debug, Error)] 232 | pub(crate) enum TrackerError { 233 | #[error("interactions with tracker did not complete in time")] 234 | Timeout, 235 | #[error("tracker replied with error message {0:?}")] 236 | Failure(String), 237 | #[error(transparent)] 238 | Http(#[from] HttpTrackerError), 239 | #[error(transparent)] 240 | Udp(#[from] UdpTrackerError), 241 | } 242 | 243 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 244 | enum AnnounceEvent { 245 | #[allow(dead_code)] 246 | Announce, 247 | #[allow(dead_code)] 248 | Completed, 249 | Started, 250 | Stopped, 251 | } 252 | 253 | impl AnnounceEvent { 254 | fn add_query_param(&self, url: &mut Url) { 255 | let value = match self { 256 | AnnounceEvent::Announce => return, 257 | AnnounceEvent::Completed => "completed", 258 | AnnounceEvent::Started => "started", 259 | AnnounceEvent::Stopped => "stopped", 260 | }; 261 | url.query_pairs_mut().append_pair("event", value); 262 | } 263 | 264 | fn for_udp(&self) -> u32 { 265 | match self { 266 | AnnounceEvent::Announce => 0, 267 | AnnounceEvent::Completed => 1, 268 | AnnounceEvent::Started => 2, 269 | AnnounceEvent::Stopped => 3, 270 | } 271 | } 272 | } 273 | 274 | #[derive(Clone, Debug, Eq, PartialEq)] 275 | struct Announcement { 276 | info_hash: InfoHash, 277 | peer_id: PeerId, 278 | downloaded: u64, 279 | left: u64, 280 | uploaded: u64, 281 | event: AnnounceEvent, 282 | key: Key, 283 | numwant: u32, 284 | port: u16, 285 | crypto: TrackerCrypto, 286 | } 287 | 288 | #[derive(Clone, Debug, Eq, PartialEq)] 289 | struct AnnounceResponse { 290 | interval: u32, 291 | peers: Vec, 292 | warning_message: Option, 293 | min_interval: Option, 294 | tracker_id: Option, 295 | complete: Option, 296 | incomplete: Option, 297 | leechers: Option, 298 | seeders: Option, 299 | } 300 | 301 | /// Possible levels of encryption support to include in tracker announcements; 302 | /// only used by HTTP trackers 303 | #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] 304 | pub(crate) enum TrackerCrypto { 305 | Required, 306 | #[default] 307 | Supported, 308 | Plain, 309 | } 310 | 311 | impl TrackerCrypto { 312 | fn add_query_param(&self, url: &mut Url) { 313 | match self { 314 | TrackerCrypto::Required => { 315 | url.query_pairs_mut().append_pair("requirecrypto", "1"); 316 | } 317 | TrackerCrypto::Supported => { 318 | url.query_pairs_mut().append_pair("supportcrypto", "1"); 319 | } 320 | TrackerCrypto::Plain => (), 321 | } 322 | } 323 | } 324 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use crate::util::{PacketError, TryFromBuf}; 2 | use bytes::{Buf, Bytes}; 3 | use data_encoding::{DecodeError, BASE32, HEXLOWER_PERMISSIVE}; 4 | use rand::{ 5 | distr::{Alphanumeric, Distribution, StandardUniform}, 6 | Rng, 7 | }; 8 | use std::borrow::Cow; 9 | use std::fmt; 10 | use std::str::FromStr; 11 | use thiserror::Error; 12 | use url::Url; 13 | 14 | #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 15 | pub(crate) struct InfoHash([u8; InfoHash::LENGTH]); 16 | 17 | impl InfoHash { 18 | pub(crate) const LENGTH: usize = 20; 19 | 20 | pub(crate) fn from_hex(s: &str) -> Result { 21 | HEXLOWER_PERMISSIVE 22 | .decode(s.as_bytes()) 23 | .map_err(InfoHashError::InvalidHex)? 24 | .try_into() 25 | } 26 | 27 | pub(crate) fn from_base32(s: &str) -> Result { 28 | BASE32 29 | .decode(s.as_bytes()) 30 | .map_err(InfoHashError::InvalidBase32)? 31 | .try_into() 32 | } 33 | 34 | pub(crate) fn as_bytes(&self) -> &[u8] { 35 | self.0.as_slice() 36 | } 37 | 38 | pub(crate) fn add_query_param(&self, url: &mut Url) { 39 | add_bytes_query_param(url, "info_hash", &self.0); 40 | } 41 | } 42 | 43 | impl fmt::Display for InfoHash { 44 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 45 | for b in self.0 { 46 | write!(f, "{b:02x}")?; 47 | } 48 | Ok(()) 49 | } 50 | } 51 | 52 | impl FromStr for InfoHash { 53 | type Err = InfoHashError; 54 | 55 | fn from_str(s: &str) -> Result { 56 | if s.len() == 32 { 57 | InfoHash::from_base32(s) 58 | } else { 59 | InfoHash::from_hex(s) 60 | } 61 | } 62 | } 63 | 64 | impl TryFrom> for InfoHash { 65 | type Error = InfoHashError; 66 | 67 | fn try_from(bs: Vec) -> Result { 68 | match bs.try_into() { 69 | Ok(barray) => Ok(InfoHash(barray)), 70 | Err(bs) => Err(InfoHashError::InvalidLength(bs.len())), 71 | } 72 | } 73 | } 74 | 75 | impl TryFromBuf for InfoHash { 76 | fn try_from_buf(buf: &mut Bytes) -> Result { 77 | if buf.len() >= InfoHash::LENGTH { 78 | let mut data = [0u8; InfoHash::LENGTH]; 79 | buf.copy_to_slice(&mut data); 80 | Ok(InfoHash(data)) 81 | } else { 82 | Err(PacketError::Short) 83 | } 84 | } 85 | } 86 | 87 | #[derive(Copy, Clone, Debug, Eq, Error, PartialEq)] 88 | pub(crate) enum InfoHashError { 89 | #[error("info hash is invalid hexadecimal")] 90 | InvalidHex(#[source] DecodeError), 91 | #[error("info hash is invalid base32")] 92 | InvalidBase32(#[source] DecodeError), 93 | #[error("info hash is {0} bytes long, expected 20")] 94 | InvalidLength(usize), 95 | } 96 | 97 | #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] 98 | pub(crate) struct PeerId([u8; PeerId::LENGTH]); 99 | 100 | impl PeerId { 101 | const LENGTH: usize = 20; 102 | 103 | pub(crate) fn generate(prefix: &str, rng: &mut R) -> PeerId { 104 | let bs = prefix.as_bytes(); 105 | PeerId(std::array::from_fn(|i| { 106 | bs.get(i) 107 | .copied() 108 | .unwrap_or_else(|| Alphanumeric.sample(rng)) 109 | })) 110 | } 111 | 112 | pub(crate) fn as_bytes(&self) -> &[u8] { 113 | self.0.as_slice() 114 | } 115 | 116 | pub(crate) fn add_query_param(&self, url: &mut Url) { 117 | add_bytes_query_param(url, "peer_id", &self.0); 118 | } 119 | } 120 | 121 | impl fmt::Display for PeerId { 122 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 123 | write!(f, "{:?}", Bytes::from(self.0.to_vec())) 124 | } 125 | } 126 | 127 | impl From<&[u8; 20]> for PeerId { 128 | fn from(bs: &[u8; 20]) -> PeerId { 129 | PeerId(*bs) 130 | } 131 | } 132 | 133 | impl TryFrom<&[u8]> for PeerId { 134 | type Error = PeerIdError; 135 | 136 | fn try_from(bs: &[u8]) -> Result { 137 | match bs.try_into() { 138 | Ok(barray) => Ok(PeerId(barray)), 139 | Err(_) => Err(PeerIdError(bs.len())), 140 | } 141 | } 142 | } 143 | 144 | impl TryFrom> for PeerId { 145 | type Error = PeerIdError; 146 | 147 | fn try_from(bs: Vec) -> Result { 148 | match bs.try_into() { 149 | Ok(barray) => Ok(PeerId(barray)), 150 | Err(bs) => Err(PeerIdError(bs.len())), 151 | } 152 | } 153 | } 154 | 155 | impl TryFromBuf for PeerId { 156 | fn try_from_buf(buf: &mut Bytes) -> Result { 157 | if buf.len() >= PeerId::LENGTH { 158 | let mut data = [0u8; PeerId::LENGTH]; 159 | buf.copy_to_slice(&mut data); 160 | Ok(PeerId(data)) 161 | } else { 162 | Err(PacketError::Short) 163 | } 164 | } 165 | } 166 | 167 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 168 | #[error( 169 | "invalid length for peer id: expected {len} bytes, got {0}", 170 | len = PeerId::LENGTH 171 | )] 172 | pub(crate) struct PeerIdError(usize); 173 | 174 | /// Key used by client to identify itself to a tracker across requests 175 | /// 176 | /// Generate a random Key with `rng.gen::()`. 177 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 178 | pub(crate) struct Key(u32); 179 | 180 | impl fmt::Display for Key { 181 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 182 | write!(f, "{}", self.0) 183 | } 184 | } 185 | 186 | impl From for Key { 187 | fn from(key: u32) -> Key { 188 | Key(key) 189 | } 190 | } 191 | 192 | impl From for u32 { 193 | fn from(key: Key) -> u32 { 194 | key.0 195 | } 196 | } 197 | 198 | impl Distribution for StandardUniform { 199 | fn sample(&self, rng: &mut R) -> Key { 200 | Key(StandardUniform.sample(rng)) 201 | } 202 | } 203 | 204 | fn add_bytes_query_param(url: &mut Url, key: &str, value: &[u8]) { 205 | static SENTINEL: &str = "ADD_BYTES_QUERY_PARAM"; 206 | url.query_pairs_mut() 207 | .encoding_override(Some(&|s| { 208 | if s == SENTINEL { 209 | Cow::from(value.to_vec()) 210 | } else { 211 | Cow::from(s.as_bytes()) 212 | } 213 | })) 214 | .append_pair(key, SENTINEL) 215 | .encoding_override(None); 216 | } 217 | 218 | #[cfg(test)] 219 | mod tests { 220 | use super::*; 221 | 222 | #[test] 223 | fn test_hex_info_hash() { 224 | let info_hash = "28C55196F57753C40ACEB6FB58617E6995A7EDDB" 225 | .parse::() 226 | .unwrap(); 227 | assert_eq!( 228 | info_hash.as_bytes(), 229 | b"\x28\xC5\x51\x96\xF5\x77\x53\xC4\x0A\xCE\xB6\xFB\x58\x61\x7E\x69\x95\xA7\xED\xDB" 230 | ); 231 | assert_eq!( 232 | info_hash.to_string(), 233 | "28c55196f57753c40aceb6fb58617e6995a7eddb" 234 | ); 235 | } 236 | 237 | #[test] 238 | fn test_base32_info_hash() { 239 | let info_hash = "XBIUOS3U6ZONDH4YDRZDLEHD4UQCIK4X" 240 | .parse::() 241 | .unwrap(); 242 | assert_eq!( 243 | info_hash.as_bytes(), 244 | b"\xb8\x51\x47\x4b\x74\xf6\x5c\xd1\x9f\x98\x1c\x72\x35\x90\xe3\xe5\x20\x24\x2b\x97", 245 | ); 246 | assert_eq!( 247 | info_hash.to_string(), 248 | "b851474b74f65cd19f981c723590e3e520242b97" 249 | ); 250 | } 251 | 252 | #[test] 253 | fn test_add_query_param() { 254 | let info_hash = "28C55196F57753C40ACEB6FB58617E6995A7EDDB" 255 | .parse::() 256 | .unwrap(); 257 | let mut url = Url::parse("http://tracker.example.com:8080/announce?here=there").unwrap(); 258 | info_hash.add_query_param(&mut url); 259 | assert_eq!(url.as_str(), "http://tracker.example.com:8080/announce?here=there&info_hash=%28%C5Q%96%F5wS%C4%0A%CE%B6%FBXa%7Ei%95%A7%ED%DB"); 260 | } 261 | 262 | #[test] 263 | fn test_generate_peer_id() { 264 | let peer_id = PeerId::generate("-PRE-123-", &mut rand::rng()); 265 | assert_eq!(peer_id.as_bytes().len(), 20); 266 | let s = std::str::from_utf8(peer_id.as_bytes()).unwrap(); 267 | let suffix = s.strip_prefix("-PRE-123-").unwrap(); 268 | for ch in suffix.chars() { 269 | assert!(ch.is_ascii_alphanumeric()); 270 | } 271 | assert_eq!(peer_id.to_string(), format!("b{s:?}")); 272 | } 273 | 274 | #[test] 275 | fn test_generate_peer_id_long_prefix() { 276 | let peer_id = PeerId::generate("-PRE-123-abcdefghijé-", &mut rand::rng()); 277 | assert_eq!(peer_id.as_bytes(), b"-PRE-123-abcdefghij\xC3"); 278 | assert_eq!(peer_id.to_string(), "b\"-PRE-123-abcdefghij\\xc3\""); 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use bendy::decoding::{Decoder, FromBencode}; 2 | use bytes::{Buf, Bytes}; 3 | use std::fmt; 4 | use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; 5 | use thiserror::Error; 6 | 7 | pub(crate) fn comma_list(values: &[T]) -> CommaList<'_, T> { 8 | CommaList(values) 9 | } 10 | 11 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 12 | pub(crate) struct CommaList<'a, T>(&'a [T]); 13 | 14 | impl fmt::Display for CommaList<'_, T> { 15 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 16 | let mut first = true; 17 | for val in self.0 { 18 | if !std::mem::replace(&mut first, false) { 19 | write!(f, ", ")?; 20 | } 21 | write!(f, "{val}")?; 22 | } 23 | if first { 24 | write!(f, "")?; 25 | } 26 | Ok(()) 27 | } 28 | } 29 | 30 | #[derive(Clone, Debug, Eq, PartialEq)] 31 | pub(crate) struct TryBytes(Bytes); 32 | 33 | impl TryBytes { 34 | pub(crate) fn try_get(&mut self) -> Result { 35 | T::try_from_buf(&mut self.0) 36 | } 37 | 38 | pub(crate) fn try_get_all(mut self) -> Result, PacketError> { 39 | let mut values = Vec::new(); 40 | while self.0.has_remaining() { 41 | values.push(self.try_get()?); 42 | } 43 | Ok(values) 44 | } 45 | 46 | pub(crate) fn try_get_bytes(&mut self, len: usize) -> Result { 47 | if self.0.len() >= len { 48 | Ok(self.0.copy_to_bytes(len)) 49 | } else { 50 | Err(PacketError::Short) 51 | } 52 | } 53 | 54 | pub(crate) fn remainder(self) -> Bytes { 55 | self.0 56 | } 57 | 58 | pub(crate) fn eof(self) -> Result<(), PacketError> { 59 | if self.0.has_remaining() { 60 | Err(PacketError::Long) 61 | } else { 62 | Ok(()) 63 | } 64 | } 65 | 66 | pub(crate) fn into_string_lossy(self) -> String { 67 | String::from_utf8_lossy(&self.0).into_owned() 68 | } 69 | } 70 | 71 | impl From for TryBytes { 72 | fn from(bs: Bytes) -> TryBytes { 73 | TryBytes(bs) 74 | } 75 | } 76 | 77 | impl From<&[u8]> for TryBytes { 78 | fn from(bs: &[u8]) -> TryBytes { 79 | TryBytes::from(Bytes::from(bs.to_vec())) 80 | } 81 | } 82 | 83 | // All integers are read in big-endian order. 84 | pub(crate) trait TryFromBuf: Sized { 85 | fn try_from_buf(buf: &mut Bytes) -> Result; 86 | } 87 | 88 | macro_rules! impl_tryfrombuf { 89 | ($t:ty, $len:literal, $arg:ident, $get:expr) => { 90 | impl TryFromBuf for $t { 91 | fn try_from_buf($arg: &mut Bytes) -> Result { 92 | if $arg.remaining() >= $len { 93 | Ok($get) 94 | } else { 95 | Err(PacketError::Short) 96 | } 97 | } 98 | } 99 | }; 100 | } 101 | 102 | impl_tryfrombuf!(u8, 1, buf, buf.get_u8()); 103 | impl_tryfrombuf!(u16, 2, buf, buf.get_u16()); 104 | impl_tryfrombuf!(u32, 4, buf, buf.get_u32()); 105 | impl_tryfrombuf!(i32, 4, buf, buf.get_i32()); 106 | impl_tryfrombuf!(u64, 8, buf, buf.get_u64()); 107 | impl_tryfrombuf!(Ipv4Addr, 4, buf, buf.get_u32().into()); 108 | impl_tryfrombuf!(Ipv6Addr, 16, buf, buf.get_u128().into()); 109 | 110 | impl TryFromBuf for SocketAddrV4 { 111 | fn try_from_buf(buf: &mut Bytes) -> Result { 112 | let ip = Ipv4Addr::try_from_buf(buf)?; 113 | let port = u16::try_from_buf(buf)?; 114 | Ok(SocketAddrV4::new(ip, port)) 115 | } 116 | } 117 | 118 | impl TryFromBuf for SocketAddrV6 { 119 | fn try_from_buf(buf: &mut Bytes) -> Result { 120 | let ip = Ipv6Addr::try_from_buf(buf)?; 121 | let port = u16::try_from_buf(buf)?; 122 | Ok(SocketAddrV6::new(ip, port, 0, 0)) 123 | } 124 | } 125 | 126 | #[derive(Copy, Clone, Debug, Error, Eq, PartialEq)] 127 | pub(crate) enum PacketError { 128 | #[error("unexpected end of packet")] 129 | Short, 130 | #[error("packet had trailing bytes")] 131 | Long, 132 | } 133 | 134 | // Like `FromBencode::from_bencode()`, but it checks that there are no trailing 135 | // bytes afterwards 136 | pub(crate) fn decode_bencode(buf: &[u8]) -> Result { 137 | let mut decoder = Decoder::new(buf).with_max_depth(T::EXPECTED_RECURSION_DEPTH); 138 | let value = match decoder.next_object()? { 139 | Some(obj) => T::decode_bencode_object(obj)?, 140 | None => return Err(UnbencodeError::NoData), 141 | }; 142 | if !matches!(decoder.next_object(), Ok(None)) { 143 | return Err(UnbencodeError::TrailingData); 144 | } 145 | Ok(value) 146 | } 147 | 148 | // We can't derive `thiserror::Error` on this, as bendy's Error is not a 149 | // standard Error. 150 | #[derive(Clone, Debug)] 151 | pub(crate) enum UnbencodeError { 152 | Bendy(bendy::decoding::Error), 153 | NoData, 154 | TrailingData, 155 | } 156 | 157 | impl fmt::Display for UnbencodeError { 158 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 159 | match self { 160 | UnbencodeError::Bendy(e) => write!(f, "{e}"), 161 | UnbencodeError::NoData => write!(f, "no data in bencode packet"), 162 | UnbencodeError::TrailingData => write!(f, "trailing bytes after bencode structure"), 163 | } 164 | } 165 | } 166 | 167 | impl From for UnbencodeError { 168 | fn from(e: bendy::decoding::Error) -> UnbencodeError { 169 | UnbencodeError::Bendy(e) 170 | } 171 | } 172 | 173 | impl std::error::Error for UnbencodeError {} 174 | 175 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 176 | pub(crate) struct ErrorChain(pub(crate) E); 177 | 178 | impl fmt::Display for ErrorChain { 179 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 180 | write!(f, "{}", self.0)?; 181 | let mut source = self.0.source(); 182 | while let Some(e) = source { 183 | write!(f, ": {e}")?; 184 | source = e.source(); 185 | } 186 | Ok(()) 187 | } 188 | } 189 | 190 | #[cfg(test)] 191 | mod tests { 192 | use super::*; 193 | 194 | #[test] 195 | fn test_comma_list() { 196 | assert_eq!(comma_list::(&[]).to_string(), ""); 197 | assert_eq!(comma_list(&[42]).to_string(), "42"); 198 | assert_eq!(comma_list(&[42, 23]).to_string(), "42, 23"); 199 | assert_eq!(comma_list(&[42, 23, 17]).to_string(), "42, 23, 17"); 200 | } 201 | 202 | #[test] 203 | fn test_try_get_u8() { 204 | let mut buf = TryBytes::from(b"abc".as_slice()); 205 | assert_eq!(buf.try_get::(), Ok(0x61)); 206 | assert_eq!(buf.try_get::(), Ok(0x62)); 207 | assert_eq!(buf.try_get::(), Ok(0x63)); 208 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 209 | } 210 | 211 | #[test] 212 | fn test_try_get_u16() { 213 | let mut buf = TryBytes::from(b"abc".as_slice()); 214 | assert_eq!(buf.try_get::(), Ok(0x6162)); 215 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 216 | } 217 | 218 | #[test] 219 | fn test_try_get_u32() { 220 | let mut buf = TryBytes::from(b"0123abc".as_slice()); 221 | assert_eq!(buf.try_get::(), Ok(0x30313233)); 222 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 223 | } 224 | 225 | #[test] 226 | fn test_try_get_i32() { 227 | let mut buf = TryBytes::from(b"\x80123abc".as_slice()); 228 | assert_eq!(buf.try_get::(), Ok(-2144259533)); 229 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 230 | } 231 | 232 | #[test] 233 | fn test_try_get_u64() { 234 | let mut buf = TryBytes::from(b"01234567abcde".as_slice()); 235 | assert_eq!(buf.try_get::(), Ok(0x3031323334353637)); 236 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 237 | } 238 | 239 | #[test] 240 | fn test_try_get_ipv4addr() { 241 | let mut buf = TryBytes::from(b"0123abc".as_slice()); 242 | assert_eq!( 243 | buf.try_get::(), 244 | Ok(Ipv4Addr::new(0x30, 0x31, 0x32, 0x33)) 245 | ); 246 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 247 | } 248 | 249 | #[test] 250 | fn test_try_get_ipv6addr() { 251 | let mut buf = TryBytes::from(b"iiiiiiiiiiiiiiii000000000".as_slice()); 252 | assert_eq!( 253 | buf.try_get::(), 254 | Ok("6969:6969:6969:6969:6969:6969:6969:6969" 255 | .parse::() 256 | .unwrap()) 257 | ); 258 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 259 | } 260 | 261 | #[test] 262 | fn test_try_get_socketaddrv4() { 263 | let mut buf = TryBytes::from(b"iiiipp0123".as_slice()); 264 | assert_eq!( 265 | buf.try_get::(), 266 | Ok(SocketAddrV4::new(Ipv4Addr::new(105, 105, 105, 105), 28784)) 267 | ); 268 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 269 | } 270 | 271 | #[test] 272 | fn test_try_get_socketaddrv6() { 273 | let mut buf = TryBytes::from(b"iiiiiiiiiiiiiiiipp012345678".as_slice()); 274 | assert_eq!( 275 | buf.try_get::(), 276 | Ok("[6969:6969:6969:6969:6969:6969:6969:6969]:28784" 277 | .parse::() 278 | .unwrap()) 279 | ); 280 | assert_eq!(buf.try_get::(), Err(PacketError::Short)); 281 | } 282 | } 283 | -------------------------------------------------------------------------------- /tests/cli.rs: -------------------------------------------------------------------------------- 1 | #![cfg(test)] 2 | use assert_cmd::Command; 3 | use bendy::decoding::FromBencode; 4 | use bendy::encoding::ToBencode; 5 | use bendy::value::Value; 6 | use bytes::Bytes; 7 | use sha1::{Digest, Sha1}; 8 | use tempfile::tempdir; 9 | 10 | fn test_get(magnet: &str, hash: &str, trackers: &[&str]) { 11 | let tmp_path = tempdir().unwrap(); 12 | Command::cargo_bin("demagnetize") 13 | .unwrap() 14 | .arg("--log-level=TRACE") 15 | .arg("--no-config") 16 | .arg("get") 17 | .arg("-o") 18 | .arg(tmp_path.path().join("{hash}.torrent")) 19 | .arg(magnet) 20 | .assert() 21 | .success(); 22 | let path = tmp_path.path().join(format!("{hash}.torrent")); 23 | assert!(path.exists()); 24 | let buf = std::fs::read(path).unwrap(); 25 | let data = Value::from_bencode(&buf).unwrap(); 26 | let Value::Dict(mut d) = data else { 27 | panic!("Torrent data is not a dict"); 28 | }; 29 | let info = d.remove(b"info".as_slice()).unwrap(); 30 | let Value::Dict(info) = info else { 31 | panic!("info is not a dict"); 32 | }; 33 | let info_bytes = info.to_bencode().unwrap(); 34 | let digest = Bytes::from(Sha1::digest(info_bytes).to_vec()); 35 | assert_eq!(format!("{digest:x}"), hash); 36 | let creation_date = d.remove(b"creation date".as_slice()).unwrap(); 37 | assert!(matches!(creation_date, Value::Integer(_))); 38 | let Value::Bytes(created_by) = d.remove(b"created by".as_slice()).unwrap() else { 39 | panic!("'created by' is not a string"); 40 | }; 41 | let created_by = std::str::from_utf8(&created_by).unwrap(); 42 | assert!(created_by.starts_with("demagnetize ")); 43 | let announce_list = d.remove(b"announce-list".as_slice()).unwrap(); 44 | let Value::List(lst) = announce_list else { 45 | panic!("announce-list is not a list"); 46 | }; 47 | let mut announced = Vec::new(); 48 | for vals in lst { 49 | let Value::List(sublist) = vals else { 50 | panic!("Element of announce-list is not a list"); 51 | }; 52 | let Value::Bytes(bs) = sublist.into_iter().next().unwrap() else { 53 | panic!("Element of element of announce-list is not a string"); 54 | }; 55 | let tr = String::from_utf8(bs.into_owned()).unwrap(); 56 | announced.push(tr); 57 | } 58 | assert_eq!(announced, trackers); 59 | assert!(d.is_empty()); 60 | } 61 | 62 | #[test] 63 | fn get_magnet_udp_trackers() { 64 | test_get( 65 | concat!( 66 | "magnet:?xt=urn:btih:63a04291a8b266d968aa7ab8a276543fa63a9e84", 67 | "&dn=libgen-rs-r_000", 68 | "&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce", 69 | "&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce", 70 | "&tr=udp%3A%2F%2Fipv4.tracker.harry.lu%3A80%2Fannounce", 71 | "&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce", 72 | "&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969%2Fannounce", 73 | ), 74 | "63a04291a8b266d968aa7ab8a276543fa63a9e84", 75 | [ 76 | "udp://tracker.opentrackr.org:1337/announce", 77 | "udp://exodus.desync.com:6969/announce", 78 | "udp://ipv4.tracker.harry.lu:80/announce", 79 | "udp://open.stealth.si:80/announce", 80 | "udp://tracker.coppersurfer.tk:6969/announce", 81 | ] 82 | .as_slice(), 83 | ); 84 | } 85 | 86 | #[test] 87 | fn get_magnet_http_trackers_multipiece_info() { 88 | test_get( 89 | concat!( 90 | "magnet:?xt=urn:btih:b851474b74f65cd19f981c723590e3e520242b97", 91 | "&dn=debian-12.0.0-amd64-netinst.iso", 92 | "&tr=http%3A%2F%2Fbttracker.debian.org%3A6969%2Fannounce", 93 | "&ws=https%3A%2F%2Fcdimage.debian.org%2Fcdimage%2Frelease%2F12.0.0%2Famd64%2Fiso-cd%2Fdebian-12.0.0-amd64-netinst.iso", 94 | "&ws=https%3A%2F%2Fcdimage.debian.org%2Fcdimage%2Farchive%2F12.0.0%2Famd64%2Fiso-cd%2Fdebian-12.0.0-amd64-netinst.iso", 95 | ), 96 | "b851474b74f65cd19f981c723590e3e520242b97", 97 | ["http://bttracker.debian.org:6969/announce"].as_slice(), 98 | ); 99 | } 100 | --------------------------------------------------------------------------------