├── .clippy.toml ├── .codecov.yml ├── .config └── nextest.toml ├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ ├── clippy.yml │ ├── cross-releases.yml │ ├── devskim.yml │ ├── http01_helper_for_test.sh │ ├── macos-releases.yml │ ├── pebble.sh │ ├── rust-build.yml │ ├── rust-test.yml │ ├── stress.sh │ └── stress.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE.APACHE ├── LICENSE.GPL ├── PROTOCOL.md ├── README.md ├── benches └── stream_throughput.rs ├── fuzz ├── .gitignore ├── Cargo.lock ├── Cargo.toml └── fuzz_targets │ ├── frame_parser.rs │ └── socket_input.rs ├── logo.png ├── src ├── arg.rs ├── client │ ├── handle_remote │ │ ├── mod.rs │ │ ├── socks │ │ │ ├── mod.rs │ │ │ ├── v4.rs │ │ │ └── v5.rs │ │ ├── tcp.rs │ │ └── udp.rs │ ├── maybe_retryable.rs │ ├── mod.rs │ └── ws_connect.rs ├── config.rs ├── main.rs ├── mux │ ├── config.rs │ ├── dupe.rs │ ├── frame.rs │ ├── lib.rs │ ├── loom.rs │ ├── proto_version.rs │ ├── stream.rs │ ├── task.rs │ ├── tests.rs │ ├── timing.rs │ └── ws.rs ├── parse_remote.rs ├── server │ ├── acme │ │ ├── challenge_helper.rs │ │ └── mod.rs │ ├── forwarder.rs │ ├── io_with_timeout.rs │ ├── mod.rs │ ├── service.rs │ └── websocket.rs ├── tests.rs └── tls │ ├── maybe_tls.rs │ ├── mod.rs │ ├── native.rs │ └── rustls.rs └── tools ├── http01_helper ├── http01_socat_helper └── penguin-v7.lua /.clippy.toml: -------------------------------------------------------------------------------- 1 | disallowed-types = [ 2 | "std::sync::Mutex", 3 | "std::sync::RwLock", 4 | "tokio::sync::Mutex", 5 | "tokio::sync::RwLock", 6 | ] 7 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | component_management: 2 | default_rules: 3 | paths: 4 | - src/** 5 | individual_components: 6 | - component_id: rlib_penguin_mux 7 | name: multiplexer 8 | paths: 9 | - src/mux/** 10 | - component_id: bin_penguin_server 11 | name: server 12 | paths: 13 | - src/server/** 14 | - component_id: bin_penguin_client 15 | name: client 16 | paths: 17 | - src/client/** 18 | 19 | 20 | -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [profile.default.junit] 2 | path = "junit.xml" 3 | 4 | [profile.default] 5 | slow-timeout = { period = "30s", terminate-after = 4 } 6 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [ myzhang1029 ] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: myzhang1029 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | -------------------------------------------------------------------------------- /.github/workflows/clippy.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | # rust-clippy is a tool that runs a bunch of lints to catch common 6 | # mistakes in your Rust code and help improve your Rust code. 7 | # More details at https://github.com/rust-lang/rust-clippy 8 | # and https://rust-lang.github.io/rust-clippy/ 9 | 10 | name: rust-clippy analyze 11 | 12 | on: 13 | push: 14 | branches: [ "main" ] 15 | pull_request: 16 | # The branches below must be a subset of the branches above 17 | branches: [ "main" ] 18 | schedule: 19 | - cron: '34 16 * * 6' 20 | 21 | jobs: 22 | rust-clippy-analyze: 23 | name: Run rust-clippy analyzing 24 | runs-on: ubuntu-latest 25 | strategy: 26 | fail-fast: false 27 | matrix: 28 | tls: 29 | - nativetls 30 | - rustls-native-roots 31 | - rustls-webpki-roots 32 | permissions: 33 | contents: read 34 | security-events: write 35 | steps: 36 | - name: Checkout code 37 | uses: actions/checkout@v4 38 | 39 | - name: Install Rust toolchain 40 | run: 41 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --component clippy 42 | 43 | - name: Install required cargo 44 | run: cargo install clippy-sarif sarif-fmt 45 | 46 | - name: Run rust-clippy with ${{ matrix.tls }} 47 | run: 48 | cargo clippy 49 | --no-default-features 50 | --features ${{ matrix.tls }},ring,tests-real-internet4,tests-real-internet6,tokio-console,deadlock-detection,penguin-binary,acme,tungstenite 51 | --message-format=json 52 | -- 53 | -D warnings --verbose | clippy-sarif | tee rust-clippy-results.sarif | sarif-fmt 54 | continue-on-error: true 55 | 56 | 57 | - name: Upload analysis results to GitHub 58 | uses: github/codeql-action/upload-sarif@v3 59 | with: 60 | sarif_file: rust-clippy-results.sarif 61 | -------------------------------------------------------------------------------- /.github/workflows/cross-releases.yml: -------------------------------------------------------------------------------- 1 | name: Cross-compile Releases 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | tags: [ "v*" ] 7 | pull_request: 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | name: Cross-compile for ${{ matrix.config.rust_target }} 15 | runs-on: ubuntu-latest 16 | 17 | strategy: 18 | fail-fast: false 19 | # Unsupported targets are unsupported by Rust, `ring`, or `mio` 20 | matrix: 21 | config: 22 | - rust_target: aarch64-linux-android 23 | profile: release-size 24 | - rust_target: aarch64-unknown-linux-gnu 25 | profile: release 26 | - rust_target: aarch64-unknown-linux-musl 27 | profile: release 28 | - rust_target: arm-linux-androideabi 29 | profile: release-size 30 | - rust_target: arm-unknown-linux-gnueabi 31 | profile: release 32 | - rust_target: arm-unknown-linux-gnueabihf 33 | profile: release 34 | - rust_target: arm-unknown-linux-musleabi 35 | profile: release-size 36 | - rust_target: arm-unknown-linux-musleabihf 37 | profile: release-size 38 | - rust_target: armv5te-unknown-linux-gnueabi 39 | profile: release 40 | - rust_target: armv5te-unknown-linux-musleabi 41 | profile: release-size 42 | - rust_target: armv7-linux-androideabi 43 | profile: release-size 44 | - rust_target: armv7-unknown-linux-gnueabi 45 | profile: release 46 | - rust_target: armv7-unknown-linux-gnueabihf 47 | profile: release 48 | - rust_target: armv7-unknown-linux-musleabi 49 | profile: release-size 50 | - rust_target: armv7-unknown-linux-musleabihf 51 | profile: release-size 52 | # - rust_target: i586-unknown-linux-gnu (ring v1.17.8 SSE) 53 | # - rust_target: i586-unknown-linux-musl (ring v1.17.8 SSE) 54 | - rust_target: i686-unknown-freebsd # (crate `aws-lc-sys`) 55 | profile: release 56 | - rust_target: i686-linux-android 57 | profile: release 58 | - rust_target: i686-pc-windows-gnu # (crate `aws-lc-sys`) 59 | profile: release 60 | - rust_target: i686-unknown-linux-gnu 61 | profile: release 62 | - rust_target: i686-unknown-linux-musl 63 | profile: release 64 | - rust_target: loongarch64-unknown-linux-gnu 65 | profile: release 66 | - rust_target: loongarch64-unknown-linux-musl 67 | profile: release 68 | # - rust_target: mips-unknown-linux-gnu (cargo-cross image support) 69 | # - rust_target: mips-unknown-linux-musl (cargo-cross image support) 70 | # - rust_target: mips64-unknown-linux-gnuabi64 (cargo-cross image support) 71 | # - rust_target: mips64-unknown-linux-muslabi64 (cargo-cross image support) 72 | # - rust_target: mips64el-unknown-linux-gnuabi64 (cargo-cross image support) 73 | # - rust_target: mips64el-unknown-linux-muslabi64 (cargo-cross image support) 74 | # - rust_target: mipsel-unknown-linux-gnu (cargo-cross image support) 75 | # - rust_target: mipsel-unknown-linux-musl (cargo-cross image support) 76 | - rust_target: powerpc-unknown-linux-gnu 77 | profile: release 78 | - rust_target: powerpc64-unknown-linux-gnu 79 | profile: release 80 | - rust_target: powerpc64le-unknown-linux-gnu 81 | profile: release 82 | - rust_target: riscv64gc-unknown-linux-gnu # (crate `aws-lc-sys`) 83 | profile: release 84 | - rust_target: s390x-unknown-linux-gnu 85 | profile: release 86 | # - rust_target: sparc64-unknown-linux-gnu (ring support) 87 | # - rust_target: sparcv9-sun-solaris (ring support) 88 | # - rust_target: thumbv6m-none-eabi # (can't find crate for `std`) 89 | # - rust_target: thumbv7em-none-eabi # (can't find crate for `std`) 90 | # - rust_target: thumbv7em-none-eabihf # (can't find crate for `std`) 91 | # - rust_target: thumbv7m-none-eabi # (can't find crate for `std`) 92 | - rust_target: thumbv7neon-linux-androideabi 93 | profile: release-size 94 | - rust_target: thumbv7neon-unknown-linux-gnueabihf 95 | profile: release-size 96 | # - rust_target: thumbv8m.base-none-eabi # (can't find crate for `std`) 97 | # - rust_target: thumbv8m.main-none-eabi # (can't find crate for `std`) 98 | # - rust_target: thumbv8m.main-none-eabihf # (can't find crate for `std`) 99 | # - rust_target: wasm32-unknown-emscripten (mio support) 100 | - rust_target: x86_64-linux-android 101 | profile: release 102 | - rust_target: x86_64-pc-windows-gnu # (crate `aws-lc-sys`) 103 | profile: release 104 | # - rust_target: x86_64-pc-solaris # (linking issues) 105 | - rust_target: x86_64-unknown-freebsd # (crate `aws-lc-sys`) 106 | profile: release 107 | # - rust_target: x86_64-unknown-dragonfly (can't find crate for `core`) 108 | - rust_target: x86_64-unknown-illumos # (bindgen) 109 | profile: release 110 | - rust_target: x86_64-unknown-linux-gnu 111 | profile: release 112 | - rust_target: x86_64-unknown-linux-musl 113 | profile: release 114 | - rust_target: x86_64-unknown-netbsd # (crate `aws-lc-sys`) 115 | profile: release 116 | 117 | steps: 118 | - name: Checkout source 119 | uses: actions/checkout@v4 120 | 121 | - name: Install dependencies 122 | run: cargo install cross --git https://github.com/cross-rs/cross 123 | 124 | - name: Build release 125 | run: cross build --profile ${{ matrix.config.profile }} --target ${{ matrix.config.rust_target }} --features "${{ matrix.config.rust_features }}" 126 | env: 127 | CROSS_NO_WARNINGS: 0 128 | 129 | - name: Upload binary 130 | uses: actions/upload-artifact@v4 131 | with: 132 | name: penguin-${{ matrix.config.rust_target }} 133 | path: | 134 | target/${{ matrix.config.rust_target }}/${{ matrix.config.profile }}/penguin 135 | target/${{ matrix.config.rust_target }}/${{ matrix.config.profile }}/penguin.exe 136 | -------------------------------------------------------------------------------- /.github/workflows/devskim.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | name: DevSkim 7 | 8 | on: 9 | push: 10 | branches: [ "main" ] 11 | pull_request: 12 | branches: [ "main" ] 13 | schedule: 14 | - cron: '15 13 * * 5' 15 | 16 | jobs: 17 | lint: 18 | name: DevSkim 19 | runs-on: ubuntu-latest 20 | permissions: 21 | actions: read 22 | contents: read 23 | security-events: write 24 | steps: 25 | - name: Checkout code 26 | uses: actions/checkout@v4 27 | 28 | - name: Run DevSkim scanner 29 | uses: microsoft/DevSkim-Action@v1 30 | 31 | - name: Upload DevSkim scan results to GitHub Security tab 32 | uses: github/codeql-action/upload-sarif@v3 33 | with: 34 | sarif_file: devskim-results.sarif 35 | -------------------------------------------------------------------------------- /.github/workflows/http01_helper_for_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | token="$(echo "$2"|cut -d. -f1)" 4 | WEBROOT="$(dirname "$0")" 5 | 6 | echo "WEBROOT: $WEBROOT" 7 | 8 | if [ "$1" = "create" ]; then 9 | mkdir -p "$WEBROOT/.well-known/acme-challenge" 10 | echo "$2" > "$WEBROOT/.well-known/acme-challenge/$token" 11 | elif [ "$1" = "remove" ]; then 12 | rm "$WEBROOT/.well-known/acme-challenge/$token" 13 | fi 14 | -------------------------------------------------------------------------------- /.github/workflows/macos-releases.yml: -------------------------------------------------------------------------------- 1 | name: Build macOS Releases 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | tags: [ "v*" ] 7 | pull_request: 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | name: Build macOS Releases 15 | runs-on: macos-latest 16 | 17 | steps: 18 | - name: Checkout source 19 | uses: actions/checkout@v4 20 | 21 | - name: Rust Cache 22 | uses: Swatinem/rust-cache@v2 23 | 24 | - name: Install Rust with aarch64 toolchain 25 | run: 26 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --target aarch64-apple-darwin,x86_64-apple-darwin 27 | 28 | - name: Build release 29 | run: | 30 | cargo build --release --target x86_64-apple-darwin 31 | cargo build --release --target aarch64-apple-darwin 32 | 33 | - name: Upload binary 34 | uses: actions/upload-artifact@v4 35 | with: 36 | path: target/*/release/penguin 37 | -------------------------------------------------------------------------------- /.github/workflows/pebble.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kill $(lsof -t -i :5002) 4 | kill $(lsof -t -i :14000) 5 | kill $(lsof -t -i :15000) 6 | git clone https://github.com/letsencrypt/pebble 7 | cd pebble && go run ./cmd/pebble & 8 | disown 9 | -------------------------------------------------------------------------------- /.github/workflows/rust-build.yml: -------------------------------------------------------------------------------- 1 | name: Build with Different Features 2 | 3 | on: 4 | push: 5 | pull_request: 6 | branches: [ "main" ] 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | build: 13 | runs-on: ${{ matrix.config.os }} 14 | 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | config: 19 | - name: Intel macOS 20 | os: macos-latest 21 | - name: Linux 22 | os: ubuntu-latest 23 | - name: Windows 24 | os: windows-latest 25 | tls: 26 | - nativetls 27 | - rustls-native-roots 28 | - rustls-webpki-roots 29 | 30 | steps: 31 | - uses: actions/checkout@v4 32 | 33 | - name: Install Rust toolchain 34 | run: 35 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --component llvm-tools-preview 36 | 37 | - name: Check lib only 38 | run: cargo check --verbose --no-default-features 39 | 40 | - name: Check lib with tungstenite support 41 | run: cargo check --verbose --features tungstenite --no-default-features 42 | 43 | - name: Check default features 44 | run: | 45 | cargo check --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-udp,penguin-binary,acme,tungstenite --no-default-features 46 | cargo run --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-udp,penguin-binary,acme,tungstenite --no-default-features -- --help 47 | 48 | - name: Check client only 49 | run: | 50 | cargo check --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-udp,client --no-default-features 51 | cargo run --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-udp,client --no-default-features -- --help 52 | 53 | - name: Check with most features on, using ring 54 | run: | 55 | cargo check --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-real-internet6,default-is-ipv6,tests-acme-has-pebble,penguin-binary,acme,remove-logging,deadlock-detection,nohash --no-default-features 56 | cargo run --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-real-internet6,default-is-ipv6,tests-acme-has-pebble,penguin-binary,acme,remove-logging,deadlock-detection,nohash --no-default-features -- --help 57 | 58 | - name: Check with most features on, using aws-lc-rs 59 | run: | 60 | cargo check --verbose --features ${{ matrix.tls }},aws-lc-rs,tests-real-internet4,tests-real-internet6,default-is-ipv6,tests-acme-has-pebble,penguin-binary,acme,remove-logging,deadlock-detection,nohash --no-default-features 61 | cargo run --verbose --features ${{ matrix.tls }},aws-lc-rs,tests-real-internet4,tests-real-internet6,default-is-ipv6,tests-acme-has-pebble,penguin-binary,acme,remove-logging,deadlock-detection,nohash --no-default-features -- --help 62 | 63 | - name: Check with most features on (nativetls without any rustls) 64 | if: matrix.tls == 'nativetls' 65 | run: | 66 | cargo check --verbose --features ${{ matrix.tls }},tests-real-internet4,tests-real-internet6,default-is-ipv6,penguin-binary,remove-logging,deadlock-detection,nohash --no-default-features 67 | cargo run --verbose --features ${{ matrix.tls }},tests-real-internet4,tests-real-internet6,penguin-binary,remove-logging,deadlock-detection,nohash --no-default-features -- --help 68 | 69 | - name: Check with tokio-console 70 | run: | 71 | cargo check --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-real-internet6,tests-acme-has-pebble,penguin-binary,acme,default-is-ipv6,tokio-console,deadlock-detection --no-default-features 72 | cargo run --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-real-internet6,tests-acme-has-pebble,penguin-binary,acme,default-is-ipv6,tokio-console,deadlock-detection --no-default-features -- --help 73 | env: 74 | RUSTFLAGS: "--cfg tokio_unstable" 75 | -------------------------------------------------------------------------------- /.github/workflows/rust-test.yml: -------------------------------------------------------------------------------- 1 | name: Rust Build and Test 2 | 3 | on: 4 | push: 5 | pull_request: 6 | branches: [ "main" ] 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | build: 13 | runs-on: ${{ matrix.config.os }} 14 | 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | config: 19 | - name: Intel macOS 20 | os: macos-latest 21 | - name: Linux 22 | os: ubuntu-latest 23 | - name: Windows 24 | os: windows-latest 25 | tls: 26 | - nativetls 27 | - rustls-native-roots 28 | - rustls-webpki-roots 29 | 30 | steps: 31 | - uses: actions/checkout@v4 32 | 33 | - name: Install Rust toolchain 34 | run: 35 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --component llvm-tools-preview 36 | 37 | - name: Install grcov 38 | uses: baptiste0928/cargo-install@v3 39 | with: 40 | crate: grcov 41 | 42 | - name: Install cargo-nextest 43 | uses: baptiste0928/cargo-install@v3 44 | with: 45 | crate: cargo-nextest 46 | 47 | - name: Set up Go for Pebble 48 | uses: actions/setup-go@v5 49 | with: 50 | go-version: '>=1.20.0' 51 | cache: false 52 | 53 | - name: Install and run pebble 54 | run: .github/workflows/pebble.sh 55 | 56 | - name: Run cargo tests with more features on 57 | run: cargo nextest run --all-targets --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,tests-acme-has-pebble,penguin-binary,acme,default-is-ipv6,tokio-console,deadlock-detection --no-default-features 58 | env: 59 | RUSTFLAGS: -Cinstrument-coverage 60 | 61 | - name: Run cargo tests with default features 62 | run: cargo nextest run --all-targets --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,acme,penguin-binary --no-default-features 63 | env: 64 | RUSTFLAGS: -Cinstrument-coverage 65 | 66 | - name: Run cargo tests with the nohash hashmap 67 | run: cargo nextest run --all-targets --verbose --features ${{ matrix.tls }},ring,tests-real-internet4,acme,penguin-binary,nohash --no-default-features 68 | env: 69 | RUSTFLAGS: -Cinstrument-coverage 70 | 71 | - name: Run cargo lib tests with minimal features 72 | run: cargo nextest run --all-targets --verbose --no-default-features 73 | env: 74 | RUSTFLAGS: -Cinstrument-coverage 75 | 76 | - name: Run cargo lib tests with loom 77 | run: cargo test --lib --release --no-default-features 78 | env: 79 | RUSTFLAGS: --cfg loom -Cinstrument-coverage 80 | LOOM_LOG: debug 81 | 82 | - name: Process coverage data 83 | run: grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info 84 | 85 | - name: Upload test results to Codecov 86 | if: ${{ !cancelled() }} 87 | uses: codecov/test-results-action@v1 88 | with: 89 | fail_ci_if_error: false 90 | token: ${{ secrets.CODECOV_TOKEN }} 91 | flags: ${{ matrix.tls }},${{ matrix.config.name }} 92 | 93 | - name: Upload coverage to Codecov 94 | if: ${{ !cancelled() }} 95 | uses: codecov/codecov-action@v4 96 | with: 97 | fail_ci_if_error: false 98 | handle_no_reports_found: true 99 | files: ./lcov.info 100 | token: ${{ secrets.CODECOV_TOKEN }} 101 | flags: ${{ matrix.tls }},${{ matrix.config.name }} 102 | -------------------------------------------------------------------------------- /.github/workflows/stress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o pipefail 4 | 5 | # Start the tunnels 6 | (ulimit -n 32; cargo run -- -qq server) & 7 | PID_SERVER=$! 8 | (ulimit -n 32; cargo run -- client 'ws://localhost:8080/ws' '5201:127.0.0.1:1234/tcp') & 9 | PID_CLIENT=$! 10 | 11 | python << 'EOF' & 12 | import socket 13 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | s.bind(("127.0.0.1", 1234)) 15 | s.listen() 16 | while True: 17 | conn, addr = s.accept() 18 | conn.sendall(b"Good.") 19 | conn.close() 20 | EOF 21 | PID_PYTHON=$! 22 | trap 'kill $PID_SERVER; kill $PID_CLIENT; kill $PID_PYTHON; true' EXIT 23 | 24 | sleep 3 25 | 26 | exit_status=0 27 | 28 | # Run the tests 29 | 30 | # forwarding with an active target 31 | if ~/go/bin/tcpgoon run 127.0.0.1 5201 -yc 512 32 | then 33 | echo "tcpgoon passed" 34 | else 35 | echo "tcpgoon failed" 36 | exit_status=1 37 | fi 38 | 39 | kill $PID_PYTHON 40 | 41 | # forwarding with an inactive target 42 | if ~/go/bin/tcpgoon run 127.0.0.1 5201 -yc 512 43 | then 44 | echo "tcpgoon passed" 45 | else 46 | echo "tcpgoon failed" 47 | exit_status=1 48 | fi 49 | 50 | if ! tcptunnelchecker '127.0.0.1:1234' '127.0.0.1:5201' | grep 'FAIL' 51 | then 52 | echo "tcptunnelchecker passed" 53 | else 54 | echo "tcptunnelchecker failed" 55 | #exit_status=1 56 | fi 57 | 58 | exit $exit_status 59 | -------------------------------------------------------------------------------- /.github/workflows/stress.yml: -------------------------------------------------------------------------------- 1 | name: Stress Tests 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Install Rust toolchain 21 | run: 22 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal 23 | 24 | - name: Install tcpgoon and tcptunnelchecker 25 | run: | 26 | go install github.com/dachad/tcpgoon@latest 27 | cargo install --git https://github.com/vi/tcptunnelchecker 28 | 29 | - name: Build 30 | run: cargo build --quiet 31 | 32 | - name: Run stress tests 33 | run: .github/workflows/stress.sh 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | *.trace 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rusty-penguin" 3 | version = "0.7.0" 4 | authors = ["Zhang Maiyun "] 5 | edition = "2024" 6 | description = "A fast TCP/UDP tunnel, transported over HTTP WebSocket" 7 | readme = "README.md" 8 | repository = "https://github.com/myzhang1029/penguin-rs" 9 | license = "Apache-2.0 OR GPL-3.0-or-later" 10 | keywords = ["cli", "web", "websocket"] 11 | categories = ["asynchronous", "command-line-utilities", "network-programming"] 12 | 13 | [[bin]] 14 | name = "penguin" 15 | path = "src/main.rs" 16 | required-features = ["penguin-binary-common"] 17 | 18 | [[bench]] 19 | name = "stream_throughput" 20 | harness = false 21 | required-features = ["tungstenite"] 22 | 23 | [lib] 24 | name = "penguin_mux" 25 | path = "src/mux/lib.rs" 26 | 27 | [dependencies] 28 | arc-swap = { version = "1", optional = true } 29 | aws-lc-rs = { version = "1", optional = true } 30 | base64 = { version = "0.22", optional = true } 31 | bytes = "1" 32 | clap = { version = "4", features = ["cargo", "derive"], optional = true } 33 | console-subscriber = { version = "0.4", features = ["parking_lot"], optional = true } 34 | futures-util = { version = "0.3", default-features = false } 35 | http = "1" 36 | http-body-util = { version = "0.1", optional = true } 37 | hyper = { version = "1", features = ["server", "http1", "http2"], optional = true } 38 | hyper-rustls = { version = "0.27", features = ["http1", "http2", "logging", "tls12"], default-features = false, optional = true } 39 | hyper-tls = { version = "0.6", optional = true } 40 | hyper-util = { version = "0.1", features = ["client", "client-legacy", "server", "server-auto", "tokio"], optional = true } 41 | instant-acme = { version = "0.7", features = ["hyper-rustls"], default-features = false, optional = true } 42 | log = { version = "0.4", optional = true } 43 | nohash-hasher = { version = "0.2", optional = true } 44 | parking_lot = "0.12" 45 | rand = "0.9" 46 | rcgen = { version = "0.13", features = ["pem"], optional = true, default-features = false } 47 | rustls = { version = "^0.23, >=0.23.18", features = ["logging", "tls12"], default-features = false, optional = true } 48 | rustls-native-certs = { version = "0.8", optional = true } 49 | rustls-pemfile = { version = "2", optional = true } 50 | sha1 = { version = "0.10", optional = true } 51 | thiserror = "2" 52 | tokio = { version = "^1, >=1.23.1", features = ["io-util", "macros", "parking_lot", "rt", "sync", "time"] } 53 | tokio-native-tls = { version = "0.3", optional = true } 54 | tokio-rustls = { version = "0.26", features = ["logging", "tls12"], default-features = false, optional = true } 55 | tokio-tungstenite = { version = "0.26", default-features = false, optional = true } 56 | tracing = "0.1" 57 | tracing-subscriber = { version = "0.3", optional = true } 58 | webpki-roots = { version = "1", optional = true } 59 | 60 | [dev-dependencies] 61 | divan = "0.1" 62 | tempfile = "3" 63 | # Hack; https://stackoverflow.com/q/73015087 64 | rusty-penguin = { path = ".", default-features = false, features = ["dev-dependencies"] } 65 | 66 | [target.'cfg(loom)'.dependencies] 67 | loom = { version = "0.7", features = ["checkpoint", "futures"] } 68 | 69 | [features] 70 | default = ["rustls-native-roots", "tests-real-internet4", "tests-udp", "penguin-binary", "acme", "tungstenite", "ring"] 71 | # Note that it does not make sense to use more than one TLS implementations 72 | # at the same time, but there must be at least one if `penguin-binary` is 73 | # enabled. 74 | rustls-webpki-roots = ["dep:webpki-roots", "hyper-rustls/webpki-tokio", "__rustls"] 75 | rustls-native-roots = ["dep:rustls-native-certs", "hyper-rustls/native-tokio", "__rustls"] 76 | __rustls = ["dep:rustls", "dep:rustls-pemfile", "dep:tokio-rustls"] 77 | nativetls = ["dep:tokio-native-tls", "hyper-tls/alpn"] 78 | # Use ring or aws-lc-rs (supported on fewer platforms) 79 | ring = ["instant-acme/ring", "rcgen/ring", "rustls/ring", "tokio-rustls/ring", "hyper-rustls/ring"] 80 | aws-lc-rs = ["dep:aws-lc-rs", "instant-acme/aws-lc-rs", "rcgen/aws_lc_rs", "rustls/aws-lc-rs", "tokio-rustls/aws-lc-rs", "hyper-rustls/aws-lc-rs"] 81 | # Dependencies for testing 82 | dev-dependencies = ["dep:rcgen", "tracing-subscriber/env-filter", "tokio/net", "tokio/rt-multi-thread"] 83 | # Allow some tests that require real internet connection 84 | tests-real-internet4 = [] 85 | tests-real-internet6 = [] 86 | # Allow some tests that uses UDP sockets. 87 | tests-udp = [] 88 | # Test the ACME client with a local ACME server at https://localhost:14000/dir 89 | tests-acme-has-pebble = ["acme"] 90 | # Set the default localhost or unspecified address to IPv6 91 | default-is-ipv6 = [] 92 | # Export key logs to a file specified via env SSLKEYLOGFILE 93 | rustls-keylog = ["__rustls"] 94 | # Enabling this causes `penguin` to listen for `tokio-console` connections 95 | tokio-console = ["dep:console-subscriber"] 96 | # Statically remove some logging code. This breaks `tokio-console` 97 | remove-logging = ["tracing/max_level_trace", "tracing/release_max_level_debug", "log/max_level_trace", "log/release_max_level_debug"] 98 | # `parking_lot`'s deadlock detection in a separate thread 99 | deadlock-detection = ["parking_lot/deadlock_detection"] 100 | # obtaining certificate automatically using ACME protocol 101 | acme = ["server", "dep:instant-acme", "dep:rcgen", "tokio/process"] 102 | # use tungstenite as the WebSocket implementation 103 | tungstenite = ["dep:tokio-tungstenite"] 104 | # Use nohash-hasher for flow_id hashmaps 105 | nohash = ["dep:nohash-hasher"] 106 | # `penguin` binary -- common 107 | penguin-binary-common = [ 108 | "dep:arc-swap", 109 | "dep:clap", 110 | "dep:tracing-subscriber", 111 | "tungstenite", 112 | "tokio/fs", "tokio/net", "tokio/rt-multi-thread", "tokio/signal", 113 | "tokio-tungstenite/handshake", 114 | ] 115 | # `penguin` binary -- server 116 | server = [ 117 | "dep:base64", 118 | "dep:sha1", 119 | "dep:http-body-util", 120 | "dep:hyper", 121 | "dep:hyper-util", 122 | "penguin-binary-common", 123 | ] 124 | # `penguin` binary -- client 125 | client = ["penguin-binary-common", "tokio/io-std"] 126 | # `penguin` binary 127 | # Building both is the default and recommended in most cases. 128 | # Only building the client or server binary is supported on a best-effort basis. 129 | # The tests require both parts. 130 | penguin-binary = ["server", "client"] 131 | 132 | [profile.release] 133 | codegen-units = 1 134 | lto = true 135 | 136 | [profile.profile] 137 | inherits = "release" 138 | debug = true 139 | strip = false 140 | panic = "unwind" 141 | 142 | [profile.release-size] 143 | inherits = "release" 144 | opt-level = "z" 145 | debug = false 146 | strip = true 147 | codegen-units = 1 148 | lto = true 149 | panic = "abort" 150 | 151 | [lints.rust] 152 | unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)', 'cfg(loom)'] } 153 | -------------------------------------------------------------------------------- /LICENSE.APACHE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /PROTOCOL.md: -------------------------------------------------------------------------------- 1 | # Penguin IP over WebSocket Protocol 2 | 3 | ## Introduction 4 | This document describes the protocol used by Rusty Penguin to tunnel 5 | the Transmission Control Protocol (TCP) and User Datagram Protocol (UDP) over 6 | HTTP WebSocket. 7 | 8 | The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", 9 | "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be 10 | interpreted as described in RFC 2119. 11 | 12 | ## Protocol Version 13 | The current protocol version is `penguin-v7`. 14 | 15 | ## Function Specification 16 | ### Service Architecture 17 | Penguin follows the client-server model. The client initiates connections to 18 | the server, and, in general, directs what happens on the connection. 19 | 20 | Since Penguin is based on Hypertext Transfer Protocol (HTTP) WebSocket, the 21 | server MUST be a conforming HTTP and WebSocket server. However, it is OPTIONAL 22 | for the client to be interoperable with a non-Penguin HTTP server. 23 | 24 | ### Connection Establishment 25 | The client initiates a connection with a standard HTTP WebSocket handshake. In 26 | addition to the standard HTTP WebSocket headers, the client MUST send a 27 | `Sec-WebSocket-Protocol` header with the value of the current protocol version 28 | (`penguin-v7`). The server MUST NOT complete the WebSocket upgrade if the 29 | `Sec-WebSocket-Protocol` header is missing or the value is not a version the 30 | server supports. The server MUST send a `Sec-WebSocket-Protocol` header with 31 | the accepted protocol version in the Switching Protocols response. 32 | 33 | The client MAY present a pre-shared key (PSK) to the server. The PSK is sent in 34 | the `X-Penguin-PSK` header. The server MAY use the PSK to authenticate the 35 | client, in which case the server MUST NOT complete the WebSocket upgrade if the 36 | PSK is missing or the value is not a PSK the server supports. If the server 37 | does not support or does not require PSK, it MUST ignore any `X-Penguin-PSK` 38 | header. The PSK MAY contain any value allowed as an HTTP header value. 39 | 40 | Implementations MAY support additional means of authentication, such as 41 | certificate-based authentication and HTTP basic authentication. The server MAY 42 | require the client to authenticate using any means it supports and it MAY 43 | reject the connection if the client does not authenticate using any means it 44 | supports. 45 | 46 | ### Connection Termination 47 | The client and server MAY terminate the connection at any time by sending a 48 | WebSocket close frame. 49 | 50 | ### Data Framing 51 | The client and server MAY send data to each other by sending WebSocket binary 52 | frames. The client and server MUST NOT use other WebSocket data frame types. 53 | WebSocket control frames MAY be used as specified in RFC 6455. 54 | 55 | The payload of a WebSocket binary frame MUST be a Penguin frame. 56 | 57 | Frame Format: 58 | ``` 59 | 0 1 2 3 60 | 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 61 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 62 | | Ver | Op | Flow ID (4 bytes) | 63 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 64 | | continued | Data (variable) | 65 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 66 | ``` 67 | 68 | - Ver: 4 bits, the version of the Penguin protocol. The current version is `0x07`. 69 | 70 | - Op: 4 bits, the operation code of the frame. 71 | - `0x00`: `Connect` frame 72 | - `0x01`: `Acknowledge` frame 73 | - `0x02`: `Reset` frame 74 | - `0x03`: `Finish` frame 75 | - `0x04`: `Push` frame 76 | - `0x05`: `Bind` frame 77 | - `0x06`: `Datagram` frame 78 | 79 | - Flow ID: a 32-bit unsigned integer in network byte order uniquely identifying 80 | the logical stream or datagram. Stream and Bind operations share the same 81 | flow ID space, while Datagram operations are free to share the same flow ID space 82 | or use a different flow ID space. 83 | 84 | - Data: the payload of the frame, which varies based on the operation code. 85 | 86 | #### `Connect` Frame 87 | The `Connect` frame has the following fields: 88 | - `rwnd`: a 32-bit unsigned integer in network byte order representing the 89 | maximum number of frames the sender can buffer for this logical stream. 90 | - `target_port`: a 16-bit unsigned integer in network byte order. 91 | For a client-initiated logical TCP stream, this is the target port of the 92 | TCP forwarding. For a server-initiated logical TCP stream, this is the 93 | local port of the server where this logical stream is initiated. 94 | - `target_host`: a variable-length UTF-8 string representing the target host 95 | of the TCP forwarding or the local address. 96 | 97 | #### `Acknowledge` Frame 98 | The `Acknowledge` frame has the following fields: 99 | - `psh_recvd_since`/`rwnd`: a 32-bit unsigned integer in network byte order 100 | representing the number of frames received since the last acknowledgment, 101 | if the `flow_id` belongs to an established logical TCP stream; or the 102 | maximum number of frames the receiver of the `Connect` frame can buffer for 103 | this logical stream. 104 | 105 | #### `Reset` Frame 106 | The `Reset` frame has no additional fields. 107 | 108 | #### `Finish` Frame 109 | The `Finish` frame has no additional fields. 110 | 111 | #### `Push` Frame 112 | The `Push` frame has the following fields: 113 | - `data`: the payload of the frame. 114 | 115 | #### `Bind` Frame 116 | The `Bind` frame has the following fields: 117 | - `bind_type`: a 8-bit unsigned integer in network byte order representing 118 | the type of bind request. 119 | - `target_port`: a 16-bit unsigned integer in network byte order representing 120 | the local port the server should bind to. 121 | - `target_host`: a variable-length UTF-8 string representing the IP address or 122 | hostname the server should bind to. Hostname support is optional and 123 | implementation-defined. 124 | 125 | #### `Datagram` Frame 126 | The `Datagram` frame has the following fields: 127 | - `host_len`: a 1-byte unsigned integer representing the length of the 128 | `target_host` field in octets. 129 | - `target_port`: a 16-bit unsigned integer in network byte order representing 130 | the target port of the datagram, or the local port of the server to which the 131 | datagram was sent to. 132 | - `target_host`: a variable-length UTF-8 string representing the target host 133 | of the datagram, or the local address of the server to which the datagram was 134 | sent to. 135 | - `data`: the payload of the datagram. 136 | 137 | ### Data Transfer 138 | The same WebSocket connection is used to tunnel TCP connections and transfer 139 | UDP datagrams. 140 | 141 | ### Tunneling Operations 142 | #### Logical TCP Streams 143 | A logical TCP stream connection may be established by both the client and the 144 | server. 145 | 146 | To establish a forward connection, one end MUST send a stream frame with the 147 | `Connect` operation code and a unique `flow_id`. 148 | 149 | Upon receiving the `Connect` frame, the other end MAY reject its peer's choice 150 | of `flow_id` by sending a stream frame with the `Reset` operation code. If the 151 | other end accepts the `Connect` frame, it MUST reply with a stream frame with 152 | the `Acknowledge` operation code. The data of the `Acknowledge` frame MUST be 153 | its `rwnd` value as a 32-bit unsigned integer. 154 | 155 | Both ends SHOULD save the `rwnd` value associated with that stream for later 156 | use, in a counter (`psh_recvd_since`). 157 | 158 | After the logical stream is established, both ends MAY send data in a frame 159 | with the `Push` operation code, decrementing its `psh_recvd_since` counter by 160 | one for each frame sent. One end MUST NOT send more than the corresponding 161 | `rwnd` frames before receiving an `Acknowledge` frame from the other end. 162 | 163 | Either end MAY send a frame with the `Acknowledge` operation code, with which 164 | the sender acknowledges the receipt of a certain number of frames as a 32-bit 165 | unsigned integer in network byte order in the data of the frame. Upon 166 | receiving an `Acknowledge` frame, the receiver MUST increase its corresponding 167 | `psh_recvd_since` counter by the value in the data of the frame. 168 | One end MUST send an `Acknowledge` frame when it processes `rwnd` frames from 169 | the other end after sending the last `Acknowledge` frame. 170 | However, implementations MAY send `Ack` frames more frequently to, for example, 171 | reduce blocking delay in anticipation of frequent writing. 172 | 173 | An implementation MAY choose to send `Acknowledge` with a larger `rwnd` value 174 | than what is advertised initially in the `Connect` frame. This allows the 175 | sender to increase the `rwnd` dynamically based on the network conditions and 176 | the receiver's ability to process frames. 177 | 178 | Either end MAY send a frame with the `Finish` operation code, with which the 179 | sender indicates that it will not send any more data. When both ends send a 180 | `Finish` frame, the logical stream is closed. 181 | 182 | Either end MAY send a frame with the `Reset` operation code, with which the 183 | sender indicates that it either received a frame with an invalid `flow_id` or 184 | an abrupt closure of that logical stream. When either end sends a `Reset` 185 | frame, the logical stream is closed. 186 | 187 | Since the underlying WebSocket connection is reliable, there is no need to 188 | acknowledge the receipt of a frame. Therefore, the use of the `Acknowledge` 189 | frame is only for flow control of `Push` frames. 190 | 191 | #### Client Forwarding Requests 192 | The client MAY request the server to forward a TCP connection to a specific 193 | host and port by initiating a logical TCP stream with its intended target host 194 | and port in the initial `Connect` frame. 195 | 196 | #### Client Bind Requests 197 | The client MAY request the server to listen on a specific port and forward 198 | incoming connections to the client. This is done by sending a stream frame with 199 | the `Bind` operation code. The data of the frame MUST be a 16-bit unsigned 200 | integer in network byte order representing the port number on which the server 201 | should listen for incoming connections and a variable-length UTF-8 string 202 | representing the IP address or hostname the server should bind to. 203 | 204 | Upon receiving the `Bind` frame, the server MAY reply with a stream frame with 205 | the `Reset` operation code if it cannot honour the bind request (for example, 206 | if the requested port is already in use, the requested address is invalid, or 207 | the server is not configured to allow bind requests). However, if the bind 208 | request is successfully honoured, the server MUST reply with a stream frame 209 | with the `Finish` operation code. 210 | 211 | The `bind_type` field of the `Bind` frame takes two values: 212 | - `1`: TCP socket binding request 213 | - `3`: UDP datagram binding request 214 | 215 | For a TCP socket binding request, the `flow_id` is immediately freed once the 216 | Bind request is honoured or rejected. Subsequent communications on the bound 217 | port are established using the procedures for a normal logical TCP stream. 218 | 219 | For a UDP datagram binding request, the server MUST keep the `flow_id` of the 220 | `Bind` request as the `flow_id` for future datagram frames sent to the client. 221 | Since UDP is connection-less, this ensures that the client is in control of 222 | the `flow_id` space of datagram frames. 223 | 224 | #### UDP Datagram Tunneling 225 | The client and server MAY send UDP datagrams to each other using the `Datagram` 226 | operation code. 227 | 228 | For a client-originated datagram, the client SHOULD allocate a unique `flow_id` 229 | for each (source host, source port, target host, target port) tuple it sees, 230 | and the server SHOULD forward the datagram to the target host and port 231 | specified in the `target_host` and `target_port` fields of the datagram frame. 232 | After forwarding the datagram, the server SHOULD wait for a response datagram 233 | from the target host and port for at least five (5) seconds. If the server 234 | receives a response datagram, it SHOULD send a datagram frame with the 235 | `flow_id` field set to the `flow_id` of the original datagram frame, and the 236 | `data` field set to the payload of the response datagram. The value of the 237 | `target_host` and `target_port` fields of the responding datagram frame is 238 | implementation-defined. 239 | 240 | The server MUST NOT originate a datagram frame unless the client has sent a 241 | `Bind` request with the `bind_type` set to `3`. The server SHOULD forward all 242 | UDP packets received on the bound port to the client using the `Datagram` 243 | operation code. The `flow_id` of the datagram frame sent to the client MUST 244 | be the same as the `flow_id` of the original `Bind` request. The `target_host` 245 | and `target_port` fields of the datagram frame sent to the client MUST be 246 | the same as the `target_host` and `target_port` fields of the original `Bind` 247 | request. The client MAY wait for a response datagram from the server and send 248 | such a response datagram back to the server using the same `flow_id`. The 249 | `target_host` and `target_port` fields of such a response datagram frame is 250 | implementation-defined. 251 | 252 | ## Security Considerations 253 | The protocol is designed to be indistinguishable from a normal HTTP traffic 254 | with WebSocket. The server MAY decide to make reasonable efforts to prevent the 255 | detection of the presence of the protocol, for example, by acting as a normal 256 | HTTP server and only upgrading the connection to WebSocket when the client 257 | sends a valid WebSocket handshake request with the correct PSK. 258 | 259 | The integrity of the data and confidentiality of the data are to be provided by 260 | the underlying WebSocket connection. 261 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rusty Penguin 2 | ![Logo](https://raw.githubusercontent.com/myzhang1029/penguin-rs/main/logo.png) 3 | 4 | [![Cargo Tests](https://github.com/myzhang1029/penguin-rs/actions/workflows/rust-test.yml/badge.svg)](https://github.com/myzhang1029/penguin-rs/actions/workflows/rust-test.yml) 5 | [![Crates.io](https://img.shields.io/crates/v/rusty-penguin.svg)](https://crates.io/crates/rusty-penguin) 6 | [![Dependency Status](https://deps.rs/repo/github/myzhang1029/penguin-rs/status.svg)](https://deps.rs/repo/github/myzhang1029/penguin-rs) 7 | [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10422/badge)](https://www.bestpractices.dev/projects/10422) 8 | [![Codecov](https://codecov.io/gh/myzhang1029/penguin-rs/branch/main/graph/badge.svg?token=L0TE5i23sn)](https://codecov.io/gh/myzhang1029/penguin-rs) 9 | [![wakatime](https://wakatime.com/badge/github/myzhang1029/penguin-rs.svg)](https://wakatime.com/badge/github/myzhang1029/penguin-rs) 10 | ![License](https://img.shields.io/crates/l/rusty-penguin.svg) 11 | 12 | ## About 13 | A fast TCP/UDP tunnel, transported over HTTP WebSocket. 14 | You are right. This project is inspired by `jpillora/chisel` (and subsequently 15 | my fork `myzhang1029/penguin`), but completely rewritten in Rust without any 16 | linkage to `chisel`. The logo is generated by [DALL-E](https://labs.openai.com) 17 | with the prompt ["a penguin standing behind a gear wheel, digital art, logo."]( 18 | https://labs.openai.com/s/Et1VIeCBREIRHhF7MU9NoZL6 19 | ) 20 | 21 | ## Basic Usage 22 | ### Server 23 | ```bash 24 | $ penguin server --host ::1 --port 443 --tls-cert cert.pem --tls-key key.pem --ws-psk some-secret 25 | ``` 26 | See `penguin server --help` for more options. 27 | 28 | ### Client 29 | ```bash 30 | $ penguin client --ws-psk some-secret wss://server 1080:socks 80:example.com:80 31 | ``` 32 | See `penguin client --help` for more options. 33 | 34 | ## Comparison 35 | Compared to the original `penguin` or `chisel`, this project stripped away 36 | some functionalities: 37 | 38 | - There is no internal SSH tunnels because it results in double encapsulation 39 | when used with HTTPS/WSS. 40 | 41 | - There is no user/password authentication because we do not have SSH. Instead, 42 | use PSK authentication. 43 | 44 | - There is no server keep-alive because client keep-alive is enough. 45 | 46 | - ~~There is no support to acquire an ACME certificate on-the-fly.~~ (Implemented) 47 | 48 | - ~~There is no reverse port forwarding because I am too lazy.~~ (Implemented) 49 | 50 | Other than that, this project offers these functionalities compared to 51 | `chisel`: 52 | 53 | - Plausible deniability with WebSocket PSK and working `backend`. 54 | 55 | - TLS certificate hot-reload with `SIGUSR1`. 56 | 57 | - Higher performance: my crude testing on my machine reveals that `penguin` is 58 | approximately 2x faster than `chisel` on my machine (`penguin` 59 | commit `73a0045ff` vs `chisel` commit `ab8f06a8`). 60 | ``` 61 | $ iperf3 -c 127.0.0.1 # chisel without TLS 62 | [ ID] Interval Transfer Bitrate 63 | [ 5] 0.00-10.00 sec 5.41 GBytes 4.65 Gbits/sec sender 64 | [ 5] 0.00-10.00 sec 5.40 GBytes 4.64 Gbits/sec receiver 65 | 66 | $ iperf3 -c 127.0.0.1 # penguin without TLS 67 | [ ID] Interval Transfer Bitrate 68 | [ 5] 0.00-10.00 sec 16.5 GBytes 14.2 Gbits/sec sender 69 | [ 5] 0.00-10.00 sec 16.5 GBytes 14.2 Gbits/sec receiver 70 | 71 | $ iperf3 -c 127.0.0.1 # chisel with TLS 72 | [ ID] Interval Transfer Bitrate 73 | [ 5] 0.00-10.00 sec 4.79 GBytes 4.12 Gbits/sec sender 74 | [ 5] 0.00-10.01 sec 4.79 GBytes 4.11 Gbits/sec receiver 75 | 76 | $ iperf3 -c 127.0.0.1 # penguin with TLS 77 | [ ID] Interval Transfer Bitrate 78 | [ 5] 0.00-10.01 sec 11.0 GBytes 9.48 Gbits/sec sender 79 | [ 5] 0.00-10.01 sec 11.0 GBytes 9.48 Gbits/sec receiver 80 | ``` 81 | 82 | - All the safety Rust offers. 83 | 84 | ## Protocol 85 | Servers and clients with the same protocol version are compatible with each other. However, for the best performance, it is recommended to use the same version of `penguin` on both sides. 86 | 87 | The current protocol version is `penguin-v7`. See [PROTOCOL.md](PROTOCOL.md) for details. 88 | 89 | ## Cargo Features 90 | Library features: 91 | - `tungstenite`: implement our traits on `tokio_tungstenite::WebSocketStream` (default) 92 | - `nohash`: (caution) use `nohash_hasher` as the internal `flow_id` hashmap. 93 | This option may be an optimization for resource-constrained devices, but will also open up a DoS attack vector if the peer cannot be trusted. 94 | If both peers use this penguin implementation or any other implementation 95 | that generates flow_ids with a random number generator, this is safe. 96 | 97 | Executable features: 98 | - `client`: build the client (default) 99 | - `server`: build the server (default) 100 | - `penguin-binary`: shorthand for both `server` and `client` (default) 101 | - `rustls-native-roots`: use `rustls` with system CA (default) 102 | - `rustls-webpki-roots`: use `rustls` with bundled webpki CA 103 | - `nativetls`: use `native-tls` 104 | - `ring`: use `ring` as the crypto provider for `rustls` 105 | - `aws-lc-rs`: use `aws-lc-rs` as the crypto provider for `rustls` 106 | 107 | - `default-is-ipv6`: use `::`/`::1` instead of `0.0.0.0`/`127.0.0.1` when an IP address is omitted in the client command line 108 | 109 | - `tokio-console`: enable `console-subscriber` support 110 | - `remove-logging`: statically remove `trace` level logging and tracing code 111 | - `deadlock-detection`: spawn a background thread running `parking_lot`'s deadlock detection 112 | - `acme`: (requires `server`) enable the built-in ACME client (default) 113 | Will also make the binary use `rustls` even if `nativetls` is enabled due to internal dependencies. 114 | - `rustls_keylog`: (caution) export TLS session data to the file specified in the environmental variable `SSLKEYLOGFILE` 115 | 116 | Testing features: 117 | - `tests-real-internet4`: run tests that require IPv4 access to the internet 118 | - `tests-real-internet6`: run tests that require IPv4 access to the internet 119 | - `tests-udp`: run tests that expect UDP traffic to work reliably. They may be flaky depending on the network environment. 120 | - `tests-acme-has-pebble`: test the ACME client with a local ACME server at `https://localhost:14000/dir` 121 | 122 | ## Contribution 123 | All contributions are welcome. Please make sure you 124 | 1. Write test cases for the bugfix/feature 125 | 2. Check that the patch passes all tests by running 126 | ``` 127 | cargo test 128 | ``` 129 | 3. Send a Pull Request. 130 | 131 | ## License 132 | GPL v3.0 or later or Apache License 2.0. 133 | -------------------------------------------------------------------------------- /benches/stream_throughput.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use divan::{Bencher, counter::BytesCount}; 3 | use penguin_mux::{Dupe, Multiplexor}; 4 | use std::sync::LazyLock; 5 | use tokio::{ 6 | io::{AsyncReadExt, AsyncWriteExt}, 7 | net::{TcpListener, TcpStream}, 8 | runtime, 9 | }; 10 | use tokio_tungstenite::{WebSocketStream, tungstenite::protocol::Role}; 11 | use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; 12 | 13 | fn setup_logging() { 14 | tracing_subscriber::registry() 15 | .with(tracing_subscriber::fmt::layer()) 16 | .with(EnvFilter::from_default_env()) 17 | .try_init() 18 | .ok(); 19 | } 20 | 21 | static TOKIO_RT: LazyLock = LazyLock::new(|| { 22 | runtime::Builder::new_multi_thread() 23 | .enable_all() 24 | .build() 25 | .unwrap() 26 | }); 27 | 28 | const EACH_WRITE_SIZE: usize = 4096; 29 | 30 | fn make_payload() -> Bytes { 31 | (0..EACH_WRITE_SIZE).map(|_| rand::random::()).collect() 32 | } 33 | 34 | #[divan::bench] 35 | fn baseline_ws(b: Bencher<'_, '_>) { 36 | b.with_inputs(make_payload).bench_values(|_| { 37 | TOKIO_RT.block_on(async { 38 | let s_socket = TcpListener::bind(("::1", 0)).await.unwrap(); 39 | let s_addr = s_socket.local_addr().unwrap(); 40 | tokio::spawn(async move { 41 | let tcpstream = s_socket.accept().await.unwrap().0; 42 | let server = WebSocketStream::from_raw_socket(tcpstream, Role::Server, None).await; 43 | let mux = Multiplexor::new(server, None, None); 44 | let mut stream = mux.accept_stream_channel().await.unwrap(); 45 | stream.shutdown().await.unwrap(); 46 | }); 47 | let tcpstream = TcpStream::connect(s_addr).await.unwrap(); 48 | let client = WebSocketStream::from_raw_socket(tcpstream, Role::Client, None).await; 49 | let mux = Multiplexor::new(client, None, None); 50 | let mut stream = mux.new_stream_channel(&[], 0).await.unwrap(); 51 | stream.shutdown().await.unwrap(); 52 | }); 53 | }); 54 | } 55 | 56 | #[divan::bench(args = [2, 8, 64, 512, 4096, 32768])] 57 | fn baseline_tcp(b: Bencher<'_, '_>, num_writes: usize) { 58 | b.with_inputs(make_payload) 59 | .counter(BytesCount::new(num_writes * EACH_WRITE_SIZE * 2)) 60 | .bench_values(|payload| { 61 | TOKIO_RT.block_on(async { 62 | let s_socket = TcpListener::bind(("::1", 0)).await.unwrap(); 63 | let s_addr = s_socket.local_addr().unwrap(); 64 | let s_payload = payload.dupe(); 65 | let len = payload.len(); 66 | tokio::spawn(async move { 67 | let mut stream = s_socket.accept().await.unwrap().0; 68 | for _ in 0..num_writes { 69 | stream.write_all(&s_payload).await.unwrap(); 70 | } 71 | stream.shutdown().await.unwrap(); 72 | }); 73 | let mut stream = TcpStream::connect(s_addr).await.unwrap(); 74 | stream.shutdown().await.unwrap(); 75 | let mut buf = vec![0; len]; 76 | for _ in 0..num_writes { 77 | stream.read_exact(&mut buf).await.unwrap(); 78 | // No check for correctness as this should be guaranteed by tests 79 | } 80 | assert_eq!(buf, payload); 81 | }); 82 | }); 83 | } 84 | 85 | #[divan::bench(args = [1, 4, 32, 256, 2048, 16384])] 86 | fn baseline_tcp_bidir(b: Bencher<'_, '_>, num_writes: usize) { 87 | b.with_inputs(make_payload) 88 | .counter(BytesCount::new(num_writes * EACH_WRITE_SIZE * 2)) 89 | .bench_values(|payload| { 90 | TOKIO_RT.block_on(async { 91 | let s_socket = TcpListener::bind(("::1", 0)).await.unwrap(); 92 | let s_addr = s_socket.local_addr().unwrap(); 93 | let s_payload = payload.dupe(); 94 | let len = payload.len(); 95 | tokio::spawn(async move { 96 | let mut stream = s_socket.accept().await.unwrap().0; 97 | let mut buf = vec![0; len]; 98 | for _ in 0..num_writes { 99 | stream.write_all(&s_payload).await.unwrap(); 100 | stream.read_exact(&mut buf).await.unwrap(); 101 | // No check for correctness as this should be guaranteed by tests 102 | } 103 | stream.shutdown().await.unwrap(); 104 | }); 105 | let mut stream = TcpStream::connect(s_addr).await.unwrap(); 106 | let mut buf = vec![0; len]; 107 | for _ in 0..num_writes { 108 | stream.read_exact(&mut buf).await.unwrap(); 109 | stream.write_all(&buf).await.unwrap(); 110 | } 111 | stream.shutdown().await.unwrap(); 112 | assert_eq!(buf, payload); 113 | }); 114 | }); 115 | } 116 | 117 | #[divan::bench(args = [2, 8, 64, 512, 4096, 32768])] 118 | fn bench_stream_throughput(b: Bencher<'_, '_>, num_writes: usize) { 119 | b.with_inputs(make_payload) 120 | .counter(BytesCount::new(num_writes * EACH_WRITE_SIZE * 2)) 121 | .bench_values(|payload| { 122 | TOKIO_RT.block_on(async { 123 | let s_socket = TcpListener::bind(("::1", 0)).await.unwrap(); 124 | let s_addr = s_socket.local_addr().unwrap(); 125 | let s_payload = payload.dupe(); 126 | let len = payload.len(); 127 | tokio::spawn(async move { 128 | let tcpstream = s_socket.accept().await.unwrap().0; 129 | let server = 130 | WebSocketStream::from_raw_socket(tcpstream, Role::Server, None).await; 131 | let mux = Multiplexor::new(server, None, None); 132 | let mut stream = mux.accept_stream_channel().await.unwrap(); 133 | for _ in 0..num_writes { 134 | stream.write_all(&s_payload).await.unwrap(); 135 | } 136 | stream.shutdown().await.unwrap(); 137 | }); 138 | let tcpstream = TcpStream::connect(s_addr).await.unwrap(); 139 | let client = WebSocketStream::from_raw_socket(tcpstream, Role::Client, None).await; 140 | let mux = Multiplexor::new(client, None, None); 141 | let mut stream = mux.new_stream_channel(&[], 0).await.unwrap(); 142 | stream.shutdown().await.unwrap(); 143 | //tokio::time::sleep(std::time::Duration::from_secs(1)).await; 144 | let mut buf = vec![0; len]; 145 | for _ in 0..num_writes { 146 | stream.read_exact(&mut buf).await.unwrap(); 147 | // No check for correctness as this should be guaranteed by tests 148 | } 149 | }); 150 | }); 151 | } 152 | 153 | #[divan::bench(args = [1, 4, 32, 256, 2048, 16384])] 154 | fn bench_stream_throughput_bidir(b: Bencher<'_, '_>, num_writes: usize) { 155 | b.with_inputs(make_payload) 156 | .counter(BytesCount::new(num_writes * EACH_WRITE_SIZE * 2)) 157 | .bench_values(|payload| { 158 | TOKIO_RT.block_on(async { 159 | let s_socket = TcpListener::bind(("::1", 0)).await.unwrap(); 160 | let s_addr = s_socket.local_addr().unwrap(); 161 | let s_payload = payload.dupe(); 162 | let len = payload.len(); 163 | tokio::spawn(async move { 164 | let tcpstream = s_socket.accept().await.unwrap().0; 165 | let server = 166 | WebSocketStream::from_raw_socket(tcpstream, Role::Server, None).await; 167 | let mux = Multiplexor::new(server, None, None); 168 | let mut stream = mux.accept_stream_channel().await.unwrap(); 169 | let mut buf = vec![0; len]; 170 | for _ in 0..num_writes { 171 | stream.write_all(&s_payload).await.unwrap(); 172 | stream.read_exact(&mut buf).await.unwrap(); 173 | // No check for correctness as this should be guaranteed by tests 174 | } 175 | stream.shutdown().await.unwrap(); 176 | }); 177 | let tcpstream = TcpStream::connect(s_addr).await.unwrap(); 178 | let client = WebSocketStream::from_raw_socket(tcpstream, Role::Client, None).await; 179 | let mux = Multiplexor::new(client, None, None); 180 | let mut stream = mux.new_stream_channel(&[], 0).await.unwrap(); 181 | //tokio::time::sleep(std::time::Duration::from_secs(1)).await; 182 | let mut buf = vec![0; len]; 183 | for _ in 0..num_writes { 184 | stream.read_exact(&mut buf).await.unwrap(); 185 | stream.write_all(&buf).await.unwrap(); 186 | } 187 | stream.shutdown().await.unwrap(); 188 | }); 189 | }); 190 | } 191 | 192 | #[divan::bench(args = [1, 4, 16, 64, 256])] 193 | fn bench_stream_throughput_with_contention(b: Bencher<'_, '_>, num_concurrent: usize) { 194 | const EACH_JOB_WRITES: usize = 256; 195 | b.with_inputs(make_payload) 196 | .counter(BytesCount::new( 197 | num_concurrent * EACH_WRITE_SIZE * EACH_JOB_WRITES * 2, 198 | )) 199 | .bench_values(|payload| { 200 | TOKIO_RT.block_on(async { 201 | let mut jobs = tokio::task::JoinSet::new(); 202 | let s_socket = TcpListener::bind(("::1", 0)).await.unwrap(); 203 | let s_addr = s_socket.local_addr().unwrap(); 204 | let s_payload = payload.dupe(); 205 | let len = payload.len(); 206 | jobs.spawn(async move { 207 | let mut server_jobs = tokio::task::JoinSet::new(); 208 | let tcpstream = s_socket.accept().await.unwrap().0; 209 | let server = 210 | WebSocketStream::from_raw_socket(tcpstream, Role::Server, None).await; 211 | let mux = Multiplexor::new(server, None, None); 212 | for _ in 0..num_concurrent { 213 | let mut stream = mux.accept_stream_channel().await.unwrap(); 214 | let s_payload = s_payload.dupe(); 215 | server_jobs.spawn(async move { 216 | let mut buf = vec![0; len]; 217 | for _ in 0..EACH_JOB_WRITES { 218 | stream.write_all(&s_payload).await.unwrap(); 219 | stream.read_exact(&mut buf).await.unwrap(); 220 | // No check for correctness as this should be guaranteed by tests 221 | } 222 | stream.shutdown().await.unwrap(); 223 | }); 224 | } 225 | while let Some(res) = server_jobs.join_next().await { 226 | res.unwrap(); 227 | } 228 | }); 229 | let tcpstream = TcpStream::connect(s_addr).await.unwrap(); 230 | let client = WebSocketStream::from_raw_socket(tcpstream, Role::Client, None).await; 231 | let mux = Multiplexor::new(client, None, None); 232 | for _ in 0..num_concurrent { 233 | let mut stream = mux.new_stream_channel(&[], 0).await.unwrap(); 234 | jobs.spawn(async move { 235 | let mut buf = vec![0; len]; 236 | for _ in 0..EACH_JOB_WRITES { 237 | stream.read_exact(&mut buf).await.unwrap(); 238 | stream.write_all(&buf).await.unwrap(); 239 | } 240 | stream.shutdown().await.unwrap(); 241 | }); 242 | } 243 | while let Some(res) = jobs.join_next().await { 244 | res.unwrap(); 245 | } 246 | }); 247 | }); 248 | } 249 | 250 | fn main() { 251 | setup_logging(); 252 | divan::main(); 253 | } 254 | -------------------------------------------------------------------------------- /fuzz/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | corpus 3 | artifacts 4 | coverage 5 | -------------------------------------------------------------------------------- /fuzz/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rusty-penguin-fuzz" 3 | version = "0.0.0" 4 | publish = false 5 | edition = "2024" 6 | 7 | [package.metadata] 8 | cargo-fuzz = true 9 | 10 | [dependencies] 11 | bytes = "1" 12 | libfuzzer-sys = "0.4" 13 | tokio = { version = "^1, >=1.23.1", features = ["io-util"] } 14 | tokio-tungstenite = { version = "0.26", default-features = false } 15 | 16 | [dependencies.rusty-penguin] 17 | path = ".." 18 | 19 | [[bin]] 20 | name = "frame_parser" 21 | path = "fuzz_targets/frame_parser.rs" 22 | test = false 23 | doc = false 24 | bench = false 25 | 26 | [[bin]] 27 | name = "socket_input" 28 | path = "fuzz_targets/socket_input.rs" 29 | test = false 30 | doc = false 31 | bench = false 32 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/frame_parser.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | 3 | use libfuzzer_sys::fuzz_target; 4 | 5 | fuzz_target!(|data: &[u8]| { 6 | let bytes = bytes::Bytes::from(data.to_vec()); 7 | let _ = penguin_mux::frame::Frame::try_from(bytes); 8 | }); 9 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/socket_input.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | 3 | use bytes::{Buf, Bytes}; 4 | use libfuzzer_sys::fuzz_target; 5 | use penguin_mux::{Datagram, Multiplexor}; 6 | use tokio::{io::AsyncWriteExt, runtime}; 7 | use tokio_tungstenite::{WebSocketStream, tungstenite::protocol::Role}; 8 | 9 | fuzz_target!(|data: &[u8]| { 10 | let rt = runtime::Builder::new_current_thread() 11 | .enable_all() 12 | .build() 13 | .unwrap(); 14 | rt.block_on(async { 15 | let data = data.to_vec(); 16 | let (alice, mut eve) = tokio::io::duplex(1024); 17 | let ws = WebSocketStream::from_raw_socket(alice, Role::Server, None).await; 18 | let mux = Multiplexor::new(ws, None, None); 19 | eve.write_all(&data).await.unwrap(); 20 | let flow_id = as AsRef<[u8]>>::as_ref(&data) 21 | .try_get_u32() 22 | .unwrap_or(0); 23 | let target_host = data.get(4..251).unwrap_or(&[]).to_vec().into(); 24 | let target_port = as AsRef<[u8]>>::as_ref(&data) 25 | .try_get_u16() 26 | .unwrap_or(0); 27 | let data = Bytes::from(data); 28 | mux.send_datagram(Datagram { 29 | flow_id, 30 | target_host, 31 | target_port, 32 | data, 33 | }) 34 | .await 35 | .unwrap(); 36 | }); 37 | }); 38 | -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/myzhang1029/penguin-rs/16f1babf48eca2385cfa2dd410371f96b90fb76d/logo.png -------------------------------------------------------------------------------- /src/client/handle_remote/mod.rs: -------------------------------------------------------------------------------- 1 | //! Run a remote connection. 2 | //! 3 | //! These are persistent tasks that run for the lifetime of the client. 4 | //! They should try to handle connections as long as the client is alive, 5 | //! and if they fail, the entire client will fail. 6 | //! Whenever a new connection is made, it tries to create a new channel 7 | //! from the main loop and then spawns a new task to handle the connection. 8 | // 9 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 10 | 11 | pub(super) mod socks; 12 | mod tcp; 13 | mod udp; 14 | 15 | use self::socks::{handle_socks, handle_socks_stdio}; 16 | use self::tcp::{handle_tcp, handle_tcp_stdio}; 17 | use self::udp::{handle_udp, handle_udp_stdio}; 18 | use crate::client::HandlerResources; 19 | use crate::parse_remote::{LocalSpec, RemoteSpec}; 20 | use crate::parse_remote::{Protocol, Remote}; 21 | use thiserror::Error; 22 | use tokio::io::{AsyncRead, AsyncWrite}; 23 | use tracing::{debug, error}; 24 | 25 | /// Handler errors 26 | /// These are all fatal errors that will cause the client to exit. 27 | #[derive(Debug, Error)] 28 | pub enum FatalError { 29 | /// Happens when IO errors occur on client sockets, which are unlikely 30 | /// to be recoverable. 31 | // Not marked as #[from] so that we don't casually cast all IO errors 32 | #[error(transparent)] 33 | ClientIo(std::io::Error), 34 | /// Happens when the main loop exits and is thus unable to receive 35 | /// datagrams on the channel. 36 | #[error("Cannot request stream from the main loop")] 37 | RequestStream, 38 | /// Happens when the main loop exits and is thus unable to receive 39 | /// datagrams on the channel. 40 | #[error("Cannot send datagram to the main loop")] 41 | SendDatagram, 42 | /// Happens when the main loop receives an unretryable error 43 | /// while waiting for a straem to be established. 44 | #[error("Main loop exited without sending stream")] 45 | MainLoopExitWithoutSendingStream, 46 | } 47 | 48 | /// Construct a TCP remote based on the description. These are simple because 49 | /// a new channel can be created for each new connection and they do not need 50 | /// to persist after the connection. 51 | /// This should be spawned as tasks and they will remain as long as `client` 52 | /// is alive. Individual connection tasks are spawned as connections appear. 53 | #[tracing::instrument(skip_all, fields(remote = %remote), level = "debug")] 54 | pub(super) async fn handle_remote( 55 | remote: &'static Remote, 56 | handler_resources: &'static HandlerResources, 57 | ) -> Result<(), FatalError> { 58 | debug!("opening remote"); 59 | match (&remote.local_addr, &remote.remote_addr, remote.protocol) { 60 | (LocalSpec::Inet((lhost, lport)), RemoteSpec::Inet((rhost, rport)), Protocol::Tcp) => { 61 | handle_tcp(lhost, *lport, rhost, *rport, handler_resources).await 62 | } 63 | (LocalSpec::Inet((lhost, lport)), RemoteSpec::Inet((rhost, rport)), Protocol::Udp) => { 64 | handle_udp(lhost, *lport, rhost, *rport, handler_resources).await 65 | } 66 | (LocalSpec::Stdio, RemoteSpec::Inet((rhost, rport)), Protocol::Tcp) => { 67 | handle_tcp_stdio(rhost, *rport, handler_resources).await 68 | } 69 | (LocalSpec::Stdio, RemoteSpec::Inet((rhost, rport)), Protocol::Udp) => { 70 | handle_udp_stdio(rhost, *rport, handler_resources).await 71 | } 72 | (LocalSpec::Inet((lhost, lport)), RemoteSpec::Socks, _) => { 73 | // The parser guarantees that the protocol is TCP 74 | handle_socks(lhost, *lport, handler_resources).await 75 | } 76 | (LocalSpec::Stdio, RemoteSpec::Socks, _) => { 77 | // The parser guarantees that the protocol is TCP 78 | handle_socks_stdio(handler_resources).await 79 | } 80 | } 81 | } 82 | 83 | /// Merged `stdin` and `stdout` into a single stream 84 | #[derive(Debug)] 85 | pub struct Stdio { 86 | stdin: tokio::io::Stdin, 87 | stdout: tokio::io::Stdout, 88 | } 89 | 90 | impl Stdio { 91 | pub fn new() -> Self { 92 | Self { 93 | stdin: tokio::io::stdin(), 94 | stdout: tokio::io::stdout(), 95 | } 96 | } 97 | } 98 | 99 | impl AsyncRead for Stdio { 100 | fn poll_read( 101 | mut self: std::pin::Pin<&mut Self>, 102 | cx: &mut std::task::Context<'_>, 103 | buf: &mut tokio::io::ReadBuf<'_>, 104 | ) -> std::task::Poll> { 105 | std::pin::Pin::new(&mut self.stdin).poll_read(cx, buf) 106 | } 107 | } 108 | 109 | impl AsyncWrite for Stdio { 110 | fn poll_write( 111 | mut self: std::pin::Pin<&mut Self>, 112 | cx: &mut std::task::Context<'_>, 113 | buf: &[u8], 114 | ) -> std::task::Poll> { 115 | std::pin::Pin::new(&mut self.stdout).poll_write(cx, buf) 116 | } 117 | 118 | fn poll_flush( 119 | mut self: std::pin::Pin<&mut Self>, 120 | cx: &mut std::task::Context<'_>, 121 | ) -> std::task::Poll> { 122 | std::pin::Pin::new(&mut self.stdout).poll_flush(cx) 123 | } 124 | 125 | fn poll_shutdown( 126 | mut self: std::pin::Pin<&mut Self>, 127 | cx: &mut std::task::Context<'_>, 128 | ) -> std::task::Poll> { 129 | std::pin::Pin::new(&mut self.stdout).poll_shutdown(cx) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/client/handle_remote/socks/mod.rs: -------------------------------------------------------------------------------- 1 | //! SOCKS server. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | mod v4; 6 | mod v5; 7 | 8 | use super::HandlerResources; 9 | use super::tcp::{open_tcp_listener, request_tcp_channel}; 10 | use crate::client::StreamCommand; 11 | use crate::config; 12 | use bytes::{Buf, Bytes}; 13 | use penguin_mux::{Datagram, Dupe}; 14 | use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; 15 | use std::sync::Arc; 16 | use tokio::io::{AsyncBufRead, BufReader}; 17 | use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; 18 | use tokio::net::UdpSocket; 19 | use tokio::sync::mpsc; 20 | use tokio::task::JoinSet; 21 | use tracing::{debug, info, trace, warn}; 22 | 23 | // Errors that can occur while handling a SOCKS request. 24 | #[derive(Debug, thiserror::Error)] 25 | pub enum Error { 26 | /// Error writing to the client that is 27 | /// not our fault. 28 | #[error(transparent)] 29 | Write(#[from] std::io::Error), 30 | #[error("Client with version={0} is not SOCKSv4 or SOCKSv5")] 31 | SocksVersion(u8), 32 | #[error("Unsupported SOCKS command: {0}")] 33 | InvalidCommand(u8), 34 | #[error("Invalid SOCKS address type: {0}")] 35 | AddressType(u8), 36 | #[error("Cannot {0} in SOCKS request: {1}")] 37 | ProcessSocksRequest(&'static str, std::io::Error), 38 | #[error("Cannot parse SOCKS associate datagram")] 39 | ParseAssociate, 40 | #[error("Client does not support NOAUTH")] 41 | OtherAuth, 42 | /// Fatal error that we should propagate to main. 43 | #[error(transparent)] 44 | Fatal(#[from] super::FatalError), 45 | } 46 | 47 | pub(super) async fn handle_socks( 48 | lhost: &'static str, 49 | lport: u16, 50 | handler_resources: &'static HandlerResources, 51 | ) -> Result<(), super::FatalError> { 52 | // Failing to open the listener is a fatal error and should be propagated. 53 | let listener = open_tcp_listener(lhost, lport) 54 | .await 55 | .map_err(super::FatalError::ClientIo)?; 56 | let mut socks_jobs = JoinSet::new(); 57 | loop { 58 | tokio::select! { 59 | biased; 60 | Some(finished) = socks_jobs.join_next() => { 61 | if let Err(e) = finished.expect("SOCKS job panicked (this is a bug)") { 62 | if let Error::Fatal(e) = e { 63 | return Err(e); 64 | } 65 | info!("{e}"); 66 | } 67 | } 68 | result = listener.accept() => { 69 | // A failed accept() is a fatal error and should be propagated. 70 | let (stream, _) = result.map_err(super::FatalError::ClientIo)?; 71 | socks_jobs.spawn(on_socks_accept(stream, lhost, handler_resources)); 72 | } 73 | } 74 | } 75 | } 76 | 77 | #[inline] 78 | pub(super) async fn handle_socks_stdio( 79 | handler_resources: &'static HandlerResources, 80 | ) -> Result<(), super::FatalError> { 81 | if let Err(e) = on_socks_accept(super::Stdio::new(), "localhost", handler_resources).await { 82 | if let Error::Fatal(e) = e { 83 | return Err(e); 84 | } 85 | info!("{e}"); 86 | } 87 | Ok(()) 88 | } 89 | 90 | /// Handle a SOCKS5 connection. 91 | /// Based on socksv5's example. 92 | /// We need to be able to request additional channels, so we need `handler_resources` 93 | #[tracing::instrument(skip(stream, handler_resources), level = "trace")] 94 | pub(super) async fn on_socks_accept( 95 | stream: RW, 96 | local_addr: &str, 97 | handler_resources: &'static HandlerResources, 98 | ) -> Result<(), Error> 99 | where 100 | RW: AsyncRead + AsyncWrite + Unpin, 101 | { 102 | let mut bufreader = BufReader::new(stream); 103 | let version = bufreader 104 | .read_u8() 105 | .await 106 | .map_err(|e| Error::ProcessSocksRequest("read version", e))?; 107 | match version { 108 | 4 => socks4(&mut bufreader, handler_resources).await, 109 | 5 => socks5(&mut bufreader, local_addr, handler_resources).await, 110 | version => Err(Error::SocksVersion(version)), 111 | } 112 | } 113 | 114 | #[inline] 115 | #[tracing::instrument(skip_all, fields(host, port, cmd))] 116 | async fn socks4(stream: &mut RW, handler_resources: &HandlerResources) -> Result<(), Error> 117 | where 118 | RW: AsyncBufRead + AsyncWrite + Unpin, 119 | { 120 | let (command, rhost, rport) = v4::read_request(stream).await?; 121 | tracing::Span::current().record("host", format_args!("{}", String::from_utf8_lossy(&rhost))); 122 | tracing::Span::current().record("port", rport); 123 | tracing::Span::current().record("cmd", command); 124 | debug!("SOCKSv4 request"); 125 | if command == 0x01 { 126 | // CONNECT 127 | // This fails only if main has exited, which is a fatal error. 128 | let stream_command_tx_permit = handler_resources 129 | .stream_command_tx 130 | .reserve() 131 | .await 132 | .or(Err(super::FatalError::RequestStream))?; 133 | handle_connect(stream, rhost, rport, stream_command_tx_permit, false).await 134 | } else { 135 | v4::write_response(stream, 0x5b).await?; 136 | Err(Error::InvalidCommand(command)) 137 | } 138 | } 139 | 140 | #[inline] 141 | #[tracing::instrument(skip_all, fields(host, port, cmd, local = %local_addr))] 142 | async fn socks5( 143 | stream: &mut RW, 144 | local_addr: &str, 145 | handler_resources: &'static HandlerResources, 146 | ) -> Result<(), Error> 147 | where 148 | RW: AsyncBufRead + AsyncWrite + Unpin, 149 | { 150 | // Complete the handshake 151 | let methods = v5::read_auth_methods(stream).await?; 152 | if !methods.contains(&0x00) { 153 | // Send back NO ACCEPTABLE METHODS 154 | // Note that we are not compliant with RFC 1928 here, as we MUST 155 | // support GSSAPI and SHOULD support USERNAME/PASSWORD 156 | v5::write_auth_method(stream, 0xff).await?; 157 | return Err(Error::OtherAuth); 158 | } 159 | // Send back NO AUTHENTICATION REQUIRED 160 | v5::write_auth_method(stream, 0x00).await?; 161 | // Read the request 162 | let (command, rhost, rport) = v5::read_request(stream).await?; 163 | tracing::Span::current().record("host", format_args!("{}", String::from_utf8_lossy(&rhost))); 164 | tracing::Span::current().record("port", rport); 165 | tracing::Span::current().record("cmd", command); 166 | debug!("SOCKSv5 request"); 167 | match command { 168 | 0x01 => { 169 | // CONNECT 170 | // This fails only if main has exited, which is a fatal error. 171 | let stream_command_tx_permit = handler_resources 172 | .stream_command_tx 173 | .reserve() 174 | .await 175 | .or(Err(super::FatalError::RequestStream))?; 176 | handle_connect(stream, rhost, rport, stream_command_tx_permit, true).await 177 | } 178 | // UDP ASSOCIATE 179 | 0x03 => handle_associate(stream, local_addr, handler_resources).await, 180 | // We don't support BIND because I can't ask the remote host to bind 181 | _ => { 182 | v5::write_response_unspecified(stream, 0x07).await?; 183 | Err(Error::InvalidCommand(command)) 184 | } 185 | } 186 | } 187 | 188 | #[tracing::instrument(skip_all, level = "trace")] 189 | async fn handle_connect( 190 | stream: &mut RW, 191 | rhost: Bytes, 192 | rport: u16, 193 | stream_command_tx_permit: mpsc::Permit<'_, StreamCommand>, 194 | version_is_5: bool, 195 | ) -> Result<(), Error> 196 | where 197 | RW: AsyncBufRead + AsyncWrite + Unpin, 198 | { 199 | // Establish a connection to the remote host 200 | let channel = request_tcp_channel(stream_command_tx_permit, rhost, rport) 201 | .await 202 | .or(Err(super::FatalError::MainLoopExitWithoutSendingStream))?; 203 | // Send back a successful response 204 | if version_is_5 { 205 | v5::write_response_unspecified(stream, 0x00).await?; 206 | } else { 207 | v4::write_response(stream, 0x5a).await?; 208 | } 209 | trace!("SOCKS starting copy"); 210 | channel.into_copy_bidirectional_with_buf(stream).await?; 211 | Ok(()) 212 | } 213 | 214 | #[tracing::instrument(skip_all, level = "trace")] 215 | async fn handle_associate( 216 | stream: &mut RW, 217 | local_addr: &str, 218 | handler_resources: &'static HandlerResources, 219 | ) -> Result<(), Error> 220 | where 221 | RW: AsyncRead + AsyncWrite + Unpin, 222 | { 223 | let socket = match UdpSocket::bind((local_addr, 0)).await { 224 | Ok(s) => s, 225 | Err(e) => { 226 | v5::write_response_unspecified(stream, 0x01).await?; 227 | return Err(Error::ProcessSocksRequest("bind udp socket", e)); 228 | } 229 | }; 230 | let sock_local_addr = match socket.local_addr() { 231 | Ok(a) => a, 232 | Err(e) => { 233 | v5::write_response_unspecified(stream, 0x01).await?; 234 | return Err(Error::ProcessSocksRequest("get udp socket local addr", e)); 235 | } 236 | }; 237 | trace!("SOCKS relaying at {sock_local_addr}"); 238 | let relay_task = tokio::spawn(udp_relay(handler_resources, socket)); 239 | // Send back a successful response 240 | v5::write_response(stream, 0x00, sock_local_addr).await?; 241 | // My crude way to detect when the client closes the connection 242 | // I cannot pass a zero-length buffer to read_exact, because so it 243 | // skips `poll_read` and just returns 244 | stream.read_exact(&mut [0; 1]).await.ok(); 245 | relay_task.abort(); 246 | Ok(()) 247 | } 248 | 249 | /// UDP task spawned by the TCP connection 250 | #[tracing::instrument(skip_all, level = "trace")] 251 | async fn udp_relay(handler_resources: &HandlerResources, socket: UdpSocket) -> Result<(), Error> { 252 | let socket = Arc::new(socket); 253 | loop { 254 | let Some((target_host, target_port, data, src, sport)) = 255 | handle_udp_relay_header(&socket).await? 256 | else { 257 | continue; 258 | }; 259 | let client_id = handler_resources.add_udp_client((src, sport).into(), socket.dupe(), true); 260 | let datagram_frame = Datagram { 261 | target_host, 262 | target_port, 263 | flow_id: client_id, 264 | data, 265 | }; 266 | // This fails only if main has exited, which is a fatal error. 267 | handler_resources 268 | .datagram_tx 269 | .send(datagram_frame) 270 | .await 271 | .or(Err(super::FatalError::SendDatagram))?; 272 | } 273 | } 274 | 275 | /// Parse a UDP relay request. 276 | /// Returns (dst, dport, data, src, sport) 277 | async fn handle_udp_relay_header( 278 | socket: &UdpSocket, 279 | ) -> Result, Error> { 280 | let mut buf = vec![0; config::MAX_UDP_PACKET_SIZE]; 281 | let (len, addr) = socket.recv_from(&mut buf).await?; 282 | trace!("received {len} bytes from {addr}"); 283 | buf.truncate(len); 284 | let mut buf = Bytes::from(buf); 285 | if buf.remaining() < 4 { 286 | return Err(Error::ParseAssociate); 287 | } 288 | let _reserved = buf.get_u16(); 289 | let frag = buf.get_u8(); 290 | if frag != 0 { 291 | warn!("Fragmented UDP packets are not implemented"); 292 | return Ok(None); 293 | } 294 | let atyp = buf.get_u8(); 295 | let (dst, port) = match atyp { 296 | 0x01 => { 297 | // IPv4 298 | if buf.remaining() < 6 { 299 | return Err(Error::ParseAssociate); 300 | } 301 | let addr = buf.get_u32(); 302 | let dst = Ipv4Addr::from(addr).to_string(); 303 | let port = buf.get_u16(); 304 | (dst.into(), port) 305 | } 306 | 0x03 => { 307 | // Domain name 308 | if buf.remaining() < 1 { 309 | return Err(Error::ParseAssociate); 310 | } 311 | let len = usize::from(buf.get_u8()); 312 | if buf.remaining() < len + 2 { 313 | return Err(Error::ParseAssociate); 314 | } 315 | let dst = buf.split_to(len); 316 | let port = buf.get_u16(); 317 | (dst, port) 318 | } 319 | 0x04 => { 320 | // IPv6 321 | if buf.remaining() < 18 { 322 | return Err(Error::ParseAssociate); 323 | } 324 | let addr = buf.get_u128(); 325 | let dst = Ipv6Addr::from(addr).to_string(); 326 | let port = buf.get_u16(); 327 | (dst.into(), port) 328 | } 329 | _ => { 330 | warn!("Dropping datagram with invalid address type {atyp}"); 331 | return Ok(None); 332 | } 333 | }; 334 | trace!("Parsed packet: dst {dst:?} port {port}"); 335 | Ok(Some((dst, port, buf, addr.ip(), addr.port()))) 336 | } 337 | 338 | /// Send a UDP relay response 339 | pub async fn send_udp_relay_response( 340 | socket: &UdpSocket, 341 | target: SocketAddr, 342 | data: &[u8], 343 | ) -> std::io::Result { 344 | // Write the header 345 | let mut content = vec![0; 3]; 346 | match target.ip() { 347 | IpAddr::V4(ip) => { 348 | content.extend(ip.octets()); 349 | content.extend([0x01]); 350 | } 351 | IpAddr::V6(ip) => { 352 | content.extend(ip.octets()); 353 | content.extend([0x04]); 354 | } 355 | } 356 | content.extend(&target.port().to_be_bytes()); 357 | content.extend(data); 358 | socket.send_to(&content, target).await 359 | } 360 | -------------------------------------------------------------------------------- /src/client/handle_remote/socks/v4.rs: -------------------------------------------------------------------------------- 1 | //! SOCKS4/a server helpers. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use super::Error; 6 | use bytes::Bytes; 7 | use std::net::Ipv4Addr; 8 | use tokio::io::{AsyncBufRead, AsyncBufReadExt, AsyncReadExt, AsyncWrite, AsyncWriteExt}; 9 | use tracing::trace; 10 | 11 | /// Read a SOCKS4/a request from the given reader. Returns the command, address, and port. 12 | /// Writes an `Address type not supported` response to the given writer if the address type is not 13 | /// valid. We expect the version byte to have already been read. 14 | /// 15 | /// # Errors 16 | /// Underlying I/O error with a description of the context. 17 | #[inline] 18 | pub async fn read_request(reader: &mut R) -> Result<(u8, Bytes, u16), Error> 19 | where 20 | R: AsyncBufRead + Unpin, 21 | { 22 | let command = reader 23 | .read_u8() 24 | .await 25 | .map_err(|e| Error::ProcessSocksRequest("read command", e))?; 26 | let rport = reader 27 | .read_u16() 28 | .await 29 | .map_err(|e| Error::ProcessSocksRequest("read port", e))?; 30 | let ip = reader 31 | .read_u32() 32 | .await 33 | .map_err(|e| Error::ProcessSocksRequest("read ip", e))?; 34 | let mut user_id = Vec::new(); 35 | reader 36 | .read_until(0, &mut user_id) 37 | .await 38 | .map_err(|e| Error::ProcessSocksRequest("read user id", e))?; 39 | // Remove the null byte 40 | user_id.pop(); 41 | trace!("User ID: {user_id:?}"); 42 | let rhost = if ip >> 24 == 0 { 43 | let mut domain = Vec::new(); 44 | reader 45 | .read_until(0, &mut domain) 46 | .await 47 | .map_err(|e| Error::ProcessSocksRequest("read domain", e))?; 48 | // Remove the null byte 49 | domain.pop(); 50 | Bytes::from(domain) 51 | } else { 52 | Ipv4Addr::from(ip).to_string().into() 53 | }; 54 | Ok((command, rhost, rport)) 55 | } 56 | 57 | /// Write a SOCKS4/a response to the given writer. 58 | /// 59 | /// # Errors 60 | /// Underlying I/O error with a description of the context. 61 | #[inline] 62 | pub async fn write_response(writer: &mut W, response: u8) -> Result<(), Error> 63 | where 64 | W: AsyncWrite + Unpin, 65 | { 66 | let buf = [0x00, response, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]; 67 | writer 68 | .write_all(&buf) 69 | .await 70 | .map_err(|e| Error::ProcessSocksRequest("write response", e))?; 71 | writer 72 | .flush() 73 | .await 74 | .map_err(|e| Error::ProcessSocksRequest("flush", e))?; 75 | Ok(()) 76 | } 77 | 78 | #[cfg(test)] 79 | mod tests { 80 | use super::*; 81 | use std::io::Cursor; 82 | 83 | #[tokio::test] 84 | async fn test_read_request_ip() { 85 | crate::tests::setup_logging(); 86 | let mut reader = Cursor::new([0x01, 0x00, 0x50, 0x7f, 0x00, 0x00, 0x01, 0x61, 0x00]); 87 | let (command, rhost, rport) = read_request(&mut reader).await.unwrap(); 88 | assert_eq!(command, 0x01); 89 | assert_eq!(rhost, "127.0.0.1"); 90 | assert_eq!(rport, 0x50); 91 | } 92 | 93 | #[tokio::test] 94 | async fn test_read_request_domain() { 95 | crate::tests::setup_logging(); 96 | let mut reader = Cursor::new([ 97 | 0x01, 0x00, 0x50, 0x00, 0x00, 0x00, 0x01, 0x61, 0x00, 0x77, 0x77, 0x77, 0x2e, 0x65, 98 | 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x00, 99 | ]); 100 | let (command, rhost, rport) = read_request(&mut reader).await.unwrap(); 101 | assert_eq!(command, 0x01); 102 | assert_eq!(rhost, "www.example.com"); 103 | assert_eq!(rport, 0x50); 104 | } 105 | 106 | #[tokio::test] 107 | async fn test_write_response() { 108 | crate::tests::setup_logging(); 109 | let mut writer = Cursor::new(Vec::new()); 110 | write_response(&mut writer, 0x5a).await.unwrap(); 111 | assert_eq!( 112 | writer.get_ref(), 113 | &[0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] 114 | ); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/client/handle_remote/socks/v5.rs: -------------------------------------------------------------------------------- 1 | //! SOCKS5 server helpers (RFC 1928). 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use std::net::SocketAddr; 6 | 7 | use super::Error; 8 | use bytes::Bytes; 9 | use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; 10 | use tracing::trace; 11 | 12 | /// Read SOCKS authentication methods from the given reader. 13 | /// 14 | /// # Errors 15 | /// Underlying I/O error with a description of the context. 16 | #[inline] 17 | pub async fn read_auth_methods(reader: &mut R) -> Result, Error> 18 | where 19 | R: AsyncRead + Unpin, 20 | { 21 | let num_methods = reader 22 | .read_u8() 23 | .await 24 | .map_err(|e| Error::ProcessSocksRequest("read number of methods", e))?; 25 | let mut methods = vec![0; usize::from(num_methods)]; 26 | reader 27 | .read_exact(&mut methods) 28 | .await 29 | .map_err(|e| Error::ProcessSocksRequest("read methods", e))?; 30 | Ok(methods) 31 | } 32 | 33 | /// Write a SOCKS5 authentication method selection to the given writer. 34 | /// 35 | /// # Errors 36 | /// Underlying I/O error with a description of the context. 37 | #[inline] 38 | pub async fn write_auth_method(writer: &mut W, method: u8) -> Result<(), Error> 39 | where 40 | W: AsyncWrite + Unpin, 41 | { 42 | writer 43 | .write_all(&[0x05, method]) 44 | .await 45 | .map_err(|e| Error::ProcessSocksRequest("write auth method", e))?; 46 | writer 47 | .flush() 48 | .await 49 | .map_err(|e| Error::ProcessSocksRequest("flush", e))?; 50 | Ok(()) 51 | } 52 | 53 | /// Read a SOCKS5 request from the given reader. Returns the command, address, and port. 54 | /// Writes an `Address type not supported` response to the given writer if the address type is not 55 | /// valid. 56 | /// 57 | /// # Errors 58 | /// Underlying I/O error with a description of the context. 59 | #[inline] 60 | pub async fn read_request(stream: &mut RW) -> Result<(u8, Bytes, u16), Error> 61 | where 62 | RW: AsyncRead + AsyncWrite + Unpin, 63 | { 64 | let version = stream 65 | .read_u8() 66 | .await 67 | .map_err(|e| Error::ProcessSocksRequest("read version", e))?; 68 | if version != 0x05 { 69 | return Err(Error::SocksVersion(version)); 70 | } 71 | let command = stream 72 | .read_u8() 73 | .await 74 | .map_err(|e| Error::ProcessSocksRequest("read command", e))?; 75 | let _reserved = stream 76 | .read_u8() 77 | .await 78 | .map_err(|e| Error::ProcessSocksRequest("read reserved", e))?; 79 | let address = read_address(stream).await?; 80 | let port = stream 81 | .read_u16() 82 | .await 83 | .map_err(|e| Error::ProcessSocksRequest("read port", e))?; 84 | Ok((command, address, port)) 85 | } 86 | 87 | /// Read a SOCKS5 address from the given reader. 88 | /// 89 | /// # Errors 90 | /// Underlying I/O error with a description of the context. 91 | #[inline] 92 | async fn read_address(stream: &mut RW) -> Result 93 | where 94 | RW: AsyncRead + AsyncWrite + Unpin, 95 | { 96 | let address_type = stream 97 | .read_u8() 98 | .await 99 | .map_err(|e| Error::ProcessSocksRequest("read address type", e))?; 100 | trace!("address type: {address_type}"); 101 | match address_type { 102 | 0x01 => { 103 | // IPv4 104 | let mut addr = [0; 4]; 105 | stream 106 | .read_exact(&mut addr) 107 | .await 108 | .map_err(|e| Error::ProcessSocksRequest("read address", e))?; 109 | Ok(std::net::Ipv4Addr::from(addr).to_string().into()) 110 | } 111 | 0x03 => { 112 | // Domain name 113 | let len = stream 114 | .read_u8() 115 | .await 116 | .map_err(|e| Error::ProcessSocksRequest("read domain length", e))?; 117 | let mut addr = vec![0; usize::from(len)]; 118 | stream 119 | .read_exact(&mut addr) 120 | .await 121 | .map_err(|e| Error::ProcessSocksRequest("read domain address", e))?; 122 | Ok(Bytes::from(addr)) 123 | } 124 | 0x04 => { 125 | // IPv6 126 | let mut addr = [0; 16]; 127 | stream 128 | .read_exact(&mut addr) 129 | .await 130 | .map_err(|e| Error::ProcessSocksRequest("read address", e))?; 131 | Ok(std::net::Ipv6Addr::from(addr).to_string().into()) 132 | } 133 | _ => { 134 | // Unsupported address type 135 | stream 136 | .write_all(&[0x05, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) 137 | .await 138 | .map_err(|e| Error::ProcessSocksRequest("write unsupported address type", e))?; 139 | stream 140 | .flush() 141 | .await 142 | .map_err(|e| Error::ProcessSocksRequest("flush", e))?; 143 | Err(Error::AddressType(address_type)) 144 | } 145 | } 146 | } 147 | 148 | /// Write a SOCKS5 response to the given writer. 149 | /// 150 | /// # Errors 151 | /// Underlying I/O error with a description of the context. 152 | #[inline] 153 | pub async fn write_response(writer: &mut W, response: u8, local: SocketAddr) -> Result<(), Error> 154 | where 155 | W: AsyncWrite + Unpin, 156 | { 157 | let mut buf = match local { 158 | SocketAddr::V4(addr) => { 159 | // 4 bytes header + 4 bytes address + 2 bytes port 160 | let total_len = 4 + 4 + 2; 161 | let mut buf = vec![0; total_len]; 162 | buf[3] = 0x01; // address type 163 | buf[4..8].copy_from_slice(&addr.ip().octets()); 164 | buf 165 | } 166 | SocketAddr::V6(addr) => { 167 | // 4 bytes header + 16 bytes address + 2 bytes port 168 | let total_len = 4 + 16 + 2; 169 | let mut buf = vec![0; total_len]; 170 | buf[3] = 0x04; // address type 171 | buf[4..20].copy_from_slice(&addr.ip().octets()); 172 | buf 173 | } 174 | }; 175 | buf[0] = 0x05; // version 176 | buf[1] = response; // response code 177 | buf[2] = 0x00; // reserved 178 | let port = local.port(); 179 | let len = buf.len(); 180 | buf[len - 2..len].copy_from_slice(&port.to_be_bytes()); 181 | writer 182 | .write_all(&buf) 183 | .await 184 | .map_err(|e| Error::ProcessSocksRequest("write response", e))?; 185 | writer 186 | .flush() 187 | .await 188 | .map_err(|e| Error::ProcessSocksRequest("flush", e))?; 189 | Ok(()) 190 | } 191 | 192 | /// Write a failed response with an unspecified BIND address. 193 | /// 194 | /// # Errors 195 | /// Underlying I/O error with a description of the context. 196 | #[inline] 197 | pub async fn write_response_unspecified(writer: &mut W, response: u8) -> Result<(), Error> 198 | where 199 | W: AsyncWrite + Unpin, 200 | { 201 | writer 202 | .write_all(&[ 203 | 0x05, response, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 204 | ]) 205 | .await 206 | .map_err(|e| Error::ProcessSocksRequest("write response", e))?; 207 | writer 208 | .flush() 209 | .await 210 | .map_err(|e| Error::ProcessSocksRequest("flush", e))?; 211 | Ok(()) 212 | } 213 | 214 | #[cfg(test)] 215 | mod tests { 216 | use super::*; 217 | use std::io::Cursor; 218 | 219 | #[tokio::test] 220 | async fn test_read_auth_methods() { 221 | crate::tests::setup_logging(); 222 | let mut reader = Cursor::new(vec![0x02, 0x00, 0x01]); 223 | let methods = read_auth_methods(&mut reader).await.unwrap(); 224 | assert_eq!(methods, vec![0x00, 0x01]); 225 | } 226 | 227 | #[tokio::test] 228 | async fn test_write_auth_method() { 229 | crate::tests::setup_logging(); 230 | let mut writer = Cursor::new(vec![]); 231 | write_auth_method(&mut writer, 0x00).await.unwrap(); 232 | assert_eq!(writer.get_ref(), &[0x05, 0x00]); 233 | } 234 | 235 | #[tokio::test] 236 | async fn test_read_request_v4() { 237 | crate::tests::setup_logging(); 238 | let mut reader = Cursor::new(vec![ 239 | 0x05, 0x01, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x50, 240 | ]); 241 | let (command, address, port) = read_request(&mut reader).await.unwrap(); 242 | assert_eq!(command, 0x01); 243 | assert_eq!(address, "127.0.0.1"); 244 | assert_eq!(port, 0x50); 245 | } 246 | 247 | #[tokio::test] 248 | async fn test_read_request_v6() { 249 | crate::tests::setup_logging(); 250 | let mut reader = Cursor::new(vec![ 251 | 0x05, 0x01, 0x00, 0x04, 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 252 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x50, 253 | ]); 254 | let (command, address, port) = read_request(&mut reader).await.unwrap(); 255 | assert_eq!(command, 0x01); 256 | assert_eq!(address, "2001:db8::1"); 257 | assert_eq!(port, 0x50); 258 | } 259 | 260 | #[tokio::test] 261 | async fn test_read_request_domain() { 262 | crate::tests::setup_logging(); 263 | let mut reader = Cursor::new(vec![ 264 | 0x05, 0x01, 0x00, 0x03, 0x0f, 0x77, 0x77, 0x77, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 265 | 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x00, 0x50, 266 | ]); 267 | let (command, address, port) = read_request(&mut reader).await.unwrap(); 268 | assert_eq!(command, 0x01); 269 | assert_eq!(address, "www.example.com"); 270 | assert_eq!(port, 0x50); 271 | } 272 | 273 | #[tokio::test] 274 | async fn test_read_request_invalid_address_type() { 275 | crate::tests::setup_logging(); 276 | let mut reader = Cursor::new(vec![0x05, 0x01, 0x00, 0x02, 0x00, 0x50]); 277 | read_request(&mut reader).await.unwrap_err(); 278 | } 279 | 280 | #[tokio::test] 281 | async fn test_write_response() { 282 | crate::tests::setup_logging(); 283 | let mut writer = Cursor::new(vec![]); 284 | write_response(&mut writer, 0x00, ([127, 0, 0, 1], 80).into()) 285 | .await 286 | .unwrap(); 287 | assert_eq!( 288 | writer.get_ref(), 289 | &[0x05, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x50] 290 | ); 291 | } 292 | 293 | #[tokio::test] 294 | async fn test_write_response_unspecified() { 295 | crate::tests::setup_logging(); 296 | let mut writer = Cursor::new(vec![]); 297 | write_response_unspecified(&mut writer, 0x00).await.unwrap(); 298 | assert_eq!( 299 | writer.get_ref(), 300 | &[0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] 301 | ); 302 | } 303 | } 304 | -------------------------------------------------------------------------------- /src/client/handle_remote/tcp.rs: -------------------------------------------------------------------------------- 1 | //! Run a remote TCP connection. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use super::super::MaybeRetryableError; 6 | use super::FatalError; 7 | use crate::client::HandlerResources; 8 | use crate::client::{MuxStream, StreamCommand}; 9 | use bytes::Bytes; 10 | use tokio::{ 11 | net::TcpListener, 12 | sync::{mpsc, oneshot}, 13 | }; 14 | use tracing::{error, info, warn}; 15 | 16 | /// Request a channel from the mux 17 | /// Returns an error if the main loop timed out waiting for a response. 18 | #[inline] 19 | #[tracing::instrument(skip(stream_command_tx_permit), level = "debug")] 20 | pub(super) async fn request_tcp_channel( 21 | stream_command_tx_permit: mpsc::Permit<'_, StreamCommand>, 22 | dest_host: Bytes, 23 | dest_port: u16, 24 | ) -> Result { 25 | let (tx, rx) = oneshot::channel(); 26 | let stream_request = StreamCommand { 27 | tx, 28 | host: dest_host, 29 | port: dest_port, 30 | }; 31 | stream_command_tx_permit.send(stream_request); 32 | rx.await 33 | } 34 | 35 | /// Open a TCP listener. 36 | #[inline] 37 | #[tracing::instrument(level = "trace")] 38 | pub(super) async fn open_tcp_listener(lhost: &str, lport: u16) -> std::io::Result { 39 | let listener = TcpListener::bind((lhost, lport)).await?; 40 | // `expect`: at this point `listener` should be bound. Otherwise, it's a bug. 41 | let local_addr = listener 42 | .local_addr() 43 | .expect("Failed to get local address of TCP listener (this is a bug)"); 44 | info!("Listening on {local_addr}"); 45 | Ok(listener) 46 | } 47 | 48 | /// Handle a TCP Inet->Inet remote. 49 | #[inline] 50 | #[tracing::instrument(skip(handler_resources), level = "debug")] 51 | pub(super) async fn handle_tcp( 52 | lhost: &str, 53 | lport: u16, 54 | rhost: &'static str, 55 | rport: u16, 56 | handler_resources: &HandlerResources, 57 | ) -> Result<(), FatalError> { 58 | // Not being able to open a TCP listener is a fatal error. 59 | let listener = open_tcp_listener(lhost, lport) 60 | .await 61 | .map_err(FatalError::ClientIo)?; 62 | let rhost = rhost.as_bytes(); 63 | loop { 64 | // This fails only if main has exited, which is a fatal error. 65 | let stream_command_tx_permit = handler_resources 66 | .stream_command_tx 67 | .reserve() 68 | .await 69 | .or(Err(FatalError::RequestStream))?; 70 | // Only `accept` when we have a permit to send a request. 71 | // This way, the backpressure is propagated to the TCP listener. 72 | // Not being able to accept a TCP connection is a fatal error. 73 | let (mut tcp_stream, _) = listener.accept().await.map_err(FatalError::ClientIo)?; 74 | // A new channel is created for each incoming TCP connection. 75 | // It's already TCP, anyways. 76 | let channel = 77 | request_tcp_channel(stream_command_tx_permit, Bytes::from_static(rhost), rport) 78 | .await 79 | .or(Err(FatalError::MainLoopExitWithoutSendingStream))?; 80 | // Transient errors in the forwarder don't matter. 81 | tokio::spawn(async move { 82 | if let Err(error) = channel.into_copy_bidirectional(&mut tcp_stream).await { 83 | warn!("TCP forwarder failed: {error}"); 84 | } 85 | }); 86 | } 87 | } 88 | 89 | /// Handle a TCP Stdio->Inet remote. 90 | #[tracing::instrument(skip(handler_resources))] 91 | pub(super) async fn handle_tcp_stdio( 92 | rhost: &'static str, 93 | rport: u16, 94 | handler_resources: &HandlerResources, 95 | ) -> Result<(), FatalError> { 96 | let mut stdio = super::Stdio::new(); 97 | let rhost = rhost.as_bytes(); 98 | // We want `loop` to be able to continue after a connection failure 99 | loop { 100 | // This fails only if main has exited, which is a fatal error. 101 | let stream_command_tx_permit = handler_resources 102 | .stream_command_tx 103 | .reserve() 104 | .await 105 | .or(Err(FatalError::RequestStream))?; 106 | let channel = 107 | request_tcp_channel(stream_command_tx_permit, Bytes::from_static(rhost), rport) 108 | .await 109 | .or(Err(FatalError::MainLoopExitWithoutSendingStream))?; 110 | match channel.into_copy_bidirectional(&mut stdio).await { 111 | Ok(_) => { 112 | info!("TCP stdio connection closed"); 113 | break Ok(()); 114 | } 115 | Err(error) if error.retryable() => { 116 | warn!("TCP stdio connection failed: {error}"); 117 | } 118 | Err(error) => { 119 | error!("TCP stdio connection failed: {error}"); 120 | break Err(FatalError::ClientIo(error)); 121 | } 122 | } 123 | } 124 | } 125 | 126 | #[cfg(test)] 127 | mod tests { 128 | use super::*; 129 | use tokio::io::AsyncWriteExt; 130 | 131 | #[tokio::test] 132 | async fn test_open_tcp_listener() { 133 | crate::tests::setup_logging(); 134 | let listener = open_tcp_listener("127.0.0.1", 0).await.unwrap(); 135 | let local_addr = listener.local_addr().unwrap(); 136 | assert_eq!(local_addr.ip(), std::net::Ipv4Addr::LOCALHOST); 137 | let accept_task = tokio::spawn(async move { 138 | let (mut stream, _) = listener.accept().await.unwrap(); 139 | stream.shutdown().await.unwrap(); 140 | }); 141 | let mut stream = tokio::net::TcpStream::connect(local_addr).await.unwrap(); 142 | stream.shutdown().await.unwrap(); 143 | accept_task.await.unwrap(); 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/client/handle_remote/udp.rs: -------------------------------------------------------------------------------- 1 | //! Run a remote UDP connection. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use super::FatalError; 6 | use crate::client::HandlerResources; 7 | use crate::config; 8 | use bytes::Bytes; 9 | use penguin_mux::{Datagram, Dupe}; 10 | use std::sync::Arc; 11 | use tokio::io::{AsyncBufReadExt, BufReader}; 12 | use tokio::net::UdpSocket; 13 | use tracing::{info, trace}; 14 | 15 | /// Handle a UDP Inet->Inet remote. 16 | #[inline] 17 | #[tracing::instrument(skip(handler_resources), level = "debug")] 18 | pub(super) async fn handle_udp( 19 | lhost: &'static str, 20 | lport: u16, 21 | rhost: &'static str, 22 | rport: u16, 23 | handler_resources: &HandlerResources, 24 | ) -> Result<(), FatalError> { 25 | // Not being able to bind to the local port is a fatal error. 26 | let socket = UdpSocket::bind((lhost, lport)) 27 | .await 28 | .map_err(FatalError::ClientIo)?; 29 | let socket = Arc::new(socket); 30 | // `expect`: at this point `listener` should be bound. Otherwise, it's a bug. 31 | let local_addr = socket 32 | .local_addr() 33 | .expect("Failed to get local address of UDP socket (this is a bug)"); 34 | info!("Bound on {local_addr}"); 35 | loop { 36 | let mut buf = vec![0; config::MAX_UDP_PACKET_SIZE]; 37 | // `recv_from` can fail if the socket is closed, which is a fatal error. 38 | let (len, addr) = socket 39 | .recv_from(&mut buf) 40 | .await 41 | .map_err(FatalError::ClientIo)?; 42 | buf.truncate(len); 43 | trace!("received {len} bytes from {addr}"); 44 | let client_id = handler_resources.add_udp_client(addr, socket.dupe(), false); 45 | let frame = Datagram { 46 | target_host: Bytes::from(rhost), 47 | target_port: rport, 48 | flow_id: client_id, 49 | data: Bytes::from(buf), 50 | }; 51 | // This fails only if main has exited, which is a fatal error. 52 | handler_resources 53 | .datagram_tx 54 | .send(frame) 55 | .await 56 | .or(Err(FatalError::SendDatagram))?; 57 | } 58 | } 59 | 60 | /// Handle a UDP Stdio->Inet remote. 61 | #[inline] 62 | #[tracing::instrument(skip(handler_resources), level = "debug")] 63 | pub(super) async fn handle_udp_stdio( 64 | rhost: &'static str, 65 | rport: u16, 66 | handler_resources: &HandlerResources, 67 | ) -> Result<(), FatalError> { 68 | let mut stdin = BufReader::new(tokio::io::stdin()); 69 | loop { 70 | let mut line = String::new(); 71 | // We should stop if we fail to read from stdin. 72 | stdin 73 | .read_line(&mut line) 74 | .await 75 | .map_err(FatalError::ClientIo)?; 76 | let frame = Datagram { 77 | target_host: Bytes::from_static(rhost.as_bytes()), 78 | target_port: rport, 79 | flow_id: 0, 80 | data: Bytes::from(line), 81 | }; 82 | // This fails only if main has exited, which is a fatal error. 83 | handler_resources 84 | .datagram_tx 85 | .send(frame) 86 | .await 87 | .or(Err(FatalError::SendDatagram))?; 88 | } 89 | } 90 | 91 | #[cfg(test)] 92 | mod tests { 93 | use super::*; 94 | use crate::client::ClientIdMaps; 95 | use parking_lot::RwLock; 96 | 97 | #[tokio::test] 98 | async fn test_handle_udp() { 99 | static LHOST: &str = "127.0.0.1"; 100 | static RHOST: &str = "127.0.0.1"; 101 | crate::tests::setup_logging(); 102 | let (datagram_tx, mut datagram_rx) = tokio::sync::mpsc::channel(1); 103 | let (stream_command_tx, _) = tokio::sync::mpsc::channel(1); 104 | let udp_client_map = Arc::new(RwLock::new(ClientIdMaps::new())); 105 | let handler_resources = HandlerResources { 106 | datagram_tx, 107 | stream_command_tx, 108 | udp_client_map: udp_client_map.dupe(), 109 | }; 110 | let forwarding_task = 111 | tokio::spawn( 112 | async move { handle_udp(LHOST, 14196, RHOST, 255, &handler_resources).await }, 113 | ); 114 | let socket = UdpSocket::bind("127.0.0.1:0").await.unwrap(); 115 | let local_addr = socket.local_addr().unwrap(); 116 | socket.connect("127.0.0.1:14196").await.unwrap(); 117 | socket.send(b"hello").await.unwrap(); 118 | let frame = datagram_rx.recv().await.unwrap(); 119 | assert_eq!(frame.target_host, RHOST.as_bytes()); 120 | assert_eq!(frame.target_port, 255); 121 | assert_eq!(*frame.data, *b"hello"); 122 | let client_id = *udp_client_map 123 | .read() 124 | .client_addr_map 125 | .get(&(local_addr, ([127, 0, 0, 1], 14196).into())) 126 | .unwrap(); 127 | assert_eq!(frame.flow_id, client_id); 128 | forwarding_task.abort(); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/client/maybe_retryable.rs: -------------------------------------------------------------------------------- 1 | //! Identify whether an error is fatal or retryable. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | pub(super) trait MaybeRetryableError: std::error::Error { 6 | /// Returns true if we should retry the connection. 7 | fn retryable(&self) -> bool; 8 | } 9 | 10 | impl MaybeRetryableError for std::io::Error { 11 | fn retryable(&self) -> bool { 12 | self.kind() == std::io::ErrorKind::AddrNotAvailable 13 | || self.kind() == std::io::ErrorKind::BrokenPipe 14 | || self.kind() == std::io::ErrorKind::ConnectionRefused 15 | || self.kind() == std::io::ErrorKind::ConnectionReset 16 | || self.kind() == std::io::ErrorKind::HostUnreachable 17 | || self.kind() == std::io::ErrorKind::NetworkUnreachable 18 | || self.kind() == std::io::ErrorKind::ConnectionAborted 19 | || self.kind() == std::io::ErrorKind::NotConnected 20 | || self.kind() == std::io::ErrorKind::AddrNotAvailable 21 | || self.kind() == std::io::ErrorKind::NetworkDown 22 | || self.kind() == std::io::ErrorKind::TimedOut 23 | || self.kind() == std::io::ErrorKind::UnexpectedEof 24 | } 25 | } 26 | 27 | impl MaybeRetryableError for tokio_tungstenite::tungstenite::error::ProtocolError { 28 | fn retryable(&self) -> bool { 29 | matches!( 30 | self, 31 | Self::ReceivedAfterClosing 32 | | Self::ResetWithoutClosingHandshake 33 | | Self::SendAfterClosing 34 | // Often happens in errorneous network conditions. 35 | | Self::HandshakeIncomplete 36 | ) 37 | } 38 | } 39 | 40 | impl MaybeRetryableError for tokio_tungstenite::tungstenite::Error { 41 | fn retryable(&self) -> bool { 42 | match self { 43 | Self::Io(e) => e.retryable(), 44 | // `tungstenite` says that `AlreadyClosed` 45 | // "indicates your code tries to operate on the connection when it really 46 | // shouldn't anymore, so this really indicates a programmer error on your part." 47 | // But I really don't care about its difference with `ConnectionClosed` 48 | // because I dislike another indicator variable for closing. 49 | Self::AlreadyClosed | Self::ConnectionClosed => true, 50 | Self::Protocol(e) => e.retryable(), 51 | _ => false, 52 | } 53 | } 54 | } 55 | 56 | impl MaybeRetryableError for penguin_mux::Error { 57 | fn retryable(&self) -> bool { 58 | match self { 59 | Self::SendStreamToClient | Self::Closed => true, 60 | #[cfg(feature = "tungstenite")] 61 | Self::WebSocket(e) => e 62 | .downcast_ref::() 63 | .is_some_and(MaybeRetryableError::retryable), 64 | _ => false, 65 | } 66 | } 67 | } 68 | 69 | impl MaybeRetryableError for crate::tls::Error { 70 | fn retryable(&self) -> bool { 71 | match self { 72 | Self::TcpConnect(e) => e.retryable(), 73 | _ => false, 74 | } 75 | } 76 | } 77 | 78 | impl MaybeRetryableError for super::Error { 79 | fn retryable(&self) -> bool { 80 | match self { 81 | Self::Tungstenite(e) => e.retryable(), 82 | Self::TcpConnect(e) => e.retryable(), 83 | Self::Tls(e) => e.retryable(), 84 | Self::Mux(e) => e.retryable(), 85 | Self::HandshakeTimeout | Self::StreamRequestTimeout | Self::RemoteDisconnected => true, 86 | _ => false, 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/client/ws_connect.rs: -------------------------------------------------------------------------------- 1 | //! `WebSocket` connection. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use crate::arg::ClientArgs; 6 | use crate::tls::{MaybeTlsStream, tls_connect}; 7 | use http::header::HeaderValue; 8 | use penguin_mux::{Dupe, PROTOCOL_VERSION}; 9 | use tokio::net::TcpStream; 10 | use tokio_tungstenite::tungstenite::{client::IntoClientRequest, handshake::client::Request}; 11 | use tokio_tungstenite::{WebSocketStream, client_async}; 12 | use tracing::{debug, warn}; 13 | 14 | /// Perform a `WebSocket` handshake. 15 | #[tracing::instrument(skip_all, fields(server = %args.server.0), level = "debug")] 16 | async fn handshake_inner( 17 | args: &ClientArgs, 18 | ) -> Result>, super::Error> { 19 | // We already sanitized https URLs to wss 20 | let is_tls = args 21 | .server 22 | .scheme() 23 | .expect("URL scheme should be present (this is a bug)") 24 | .as_str() 25 | == "wss"; 26 | 27 | // Get the host and port from the URL 28 | // Both are guaranteed to exist by the `ClientArgs` parser 29 | let mut host = args 30 | .server 31 | .0 32 | .host() 33 | .expect("URL host should be present (this is a bug)"); 34 | // `Tcp*` functions expect IPv6 addresses to not be wrapped in square brackets 35 | if host.starts_with('[') && host.ends_with(']') { 36 | host = &host[1..host.len() - 1]; 37 | } 38 | let port = args 39 | .server 40 | .0 41 | .port_u16() 42 | .expect("URL port should be present (this is a bug)"); 43 | // Server name for SNI 44 | // To be overridden later if a custom hostname is provided 45 | let mut domain = host; 46 | 47 | // Use a request to allow additional headers 48 | let mut req: Request = args.server.0.dupe().into_client_request()?; 49 | let req_headers = req.headers_mut(); 50 | // Add protocol version 51 | req_headers.insert( 52 | "sec-websocket-protocol", 53 | HeaderValue::from_static(PROTOCOL_VERSION), 54 | ); 55 | // Add PSK 56 | if let Some(ref ws_psk) = args.ws_psk { 57 | req_headers.insert("x-penguin-psk", ws_psk.dupe()); 58 | } 59 | // Add potentially custom hostname 60 | if let Some(ref hostname) = args.hostname { 61 | req_headers.insert("host", hostname.dupe()); 62 | domain = hostname.to_str()?; 63 | } 64 | // Now add custom headers 65 | for header in &args.header { 66 | req_headers.insert(&header.name, header.value.dupe()); 67 | } 68 | let stream = if is_tls { 69 | MaybeTlsStream::Tls( 70 | tls_connect( 71 | host, 72 | port, 73 | domain, 74 | args.tls_cert.as_deref(), 75 | args.tls_key.as_deref(), 76 | args.tls_ca.as_deref(), 77 | args.tls_skip_verify, 78 | ) 79 | .await?, 80 | ) 81 | } else { 82 | // No TLS 83 | warn!("Using insecure WebSocket connection"); 84 | MaybeTlsStream::Plain( 85 | TcpStream::connect((host, port)) 86 | .await 87 | .map_err(super::Error::TcpConnect)?, 88 | ) 89 | }; 90 | let (ws_stream, _response) = client_async(req, stream).await?; 91 | debug!("WebSocket handshake succeeded"); 92 | Ok(ws_stream) 93 | } 94 | 95 | pub async fn handshake( 96 | args: &ClientArgs, 97 | ) -> Result>, super::Error> { 98 | tokio::select! { 99 | result = handshake_inner(args) => result, 100 | () = args.handshake_timeout.sleep() => Err(super::Error::HandshakeTimeout), 101 | Ok(()) = tokio::signal::ctrl_c() => Err(super::Error::HandshakeCancelled), 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | //! Default configuration parameters for the server and client. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use tokio::time; 6 | 7 | /// Both: how long to wait for responses to UDP outgoing datagrams 8 | pub const UDP_PRUNE_TIMEOUT: time::Duration = time::Duration::from_secs(10); 9 | /// Client side: Number of stream requests to buffer in the channels for the main 10 | /// loop to read from. 11 | pub const STREAM_REQUEST_COMMAND_SIZE: usize = 1 << 6; 12 | /// Both: Number of datagrams to buffer in the channels for the main loop 13 | /// to read from. 14 | pub const INCOMING_DATAGRAM_BUFFER_SIZE: usize = 1 << 6; 15 | /// Both: Maximum size of a UDP packet. 16 | pub const MAX_UDP_PACKET_SIZE: usize = 1 << 16; 17 | /// Server side: Bind request buffer size 18 | pub const BIND_BUFFER_SIZE: usize = 1 << 4; 19 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | //! A fast TCP/UDP tunnel, transported over HTTP WebSocket. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | #![warn(rust_2018_idioms, missing_docs, missing_debug_implementations)] 5 | #![warn(clippy::pedantic, clippy::cargo, clippy::unwrap_used)] 6 | #![forbid(unsafe_code)] 7 | #![cfg_attr(not(all(feature = "client", feature = "server")), allow(dead_code))] 8 | 9 | mod arg; 10 | #[cfg(feature = "client")] 11 | mod client; 12 | mod config; 13 | mod parse_remote; 14 | #[cfg(feature = "server")] 15 | mod server; 16 | #[cfg(test)] 17 | mod tests; 18 | mod tls; 19 | 20 | use thiserror::Error; 21 | use tracing::{error, trace}; 22 | use tracing_subscriber::{filter, fmt, prelude::*, reload}; 23 | 24 | /// Errors 25 | #[derive(Error)] 26 | enum Error { 27 | #[cfg(feature = "client")] 28 | #[error(transparent)] 29 | Client(#[from] client::Error), 30 | #[cfg(feature = "server")] 31 | #[error(transparent)] 32 | Server(#[from] server::Error), 33 | } 34 | 35 | impl std::fmt::Debug for Error { 36 | // Simply delegate to `Display` so when `main` exits, there 37 | // is a nice error message. 38 | #[inline] 39 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 40 | std::fmt::Display::fmt(self, f) 41 | } 42 | } 43 | 44 | const QUIET_QUIET_LOG_LEVEL: filter::LevelFilter = filter::LevelFilter::ERROR; 45 | const QUIET_LOG_LEVEL: filter::LevelFilter = filter::LevelFilter::WARN; 46 | const DEFAULT_LOG_LEVEL: filter::LevelFilter = filter::LevelFilter::INFO; 47 | const VERBOSE_LOG_LEVEL: filter::LevelFilter = filter::LevelFilter::DEBUG; 48 | const VERBOSE_VERBOSE_LOG_LEVEL: filter::LevelFilter = filter::LevelFilter::TRACE; 49 | 50 | #[cfg(feature = "deadlock-detection")] 51 | fn spawn_deadlock_detection() { 52 | use std::thread; 53 | 54 | // Create a background thread which checks for deadlocks every 10s 55 | thread::spawn(move || { 56 | loop { 57 | thread::sleep(std::time::Duration::from_secs(10)); 58 | let deadlocks = parking_lot::deadlock::check_deadlock(); 59 | if deadlocks.is_empty() { 60 | continue; 61 | } 62 | 63 | error!("{} deadlocks detected", deadlocks.len()); 64 | for (i, threads) in deadlocks.iter().enumerate() { 65 | error!("Deadlock #{}", i); 66 | for t in threads { 67 | error!("Thread Id {:#?}", t.thread_id()); 68 | error!("{:#?}", t.backtrace()); 69 | } 70 | } 71 | } 72 | }); 73 | } 74 | 75 | #[tokio::main] 76 | /// Entry point 77 | async fn main() -> Result<(), Box> { 78 | let (level_layer, reload_handle) = reload::Layer::new(DEFAULT_LOG_LEVEL); 79 | let fmt_layer = fmt::Layer::default() 80 | .compact() 81 | .with_timer(fmt::time::time()) 82 | .with_writer(std::io::stderr) 83 | .with_filter(level_layer); 84 | #[cfg(not(feature = "tokio-console"))] 85 | tracing_subscriber::registry().with(fmt_layer).init(); 86 | #[cfg(feature = "tokio-console")] 87 | tracing_subscriber::registry() 88 | .with(console_subscriber::spawn()) 89 | .with(fmt_layer) 90 | .init(); 91 | arg::PenguinCli::parse_global(); 92 | let cli_args = arg::PenguinCli::get_global(); 93 | trace!("cli_args = {cli_args:#?}"); 94 | match cli_args.verbose { 95 | 0 => {} 96 | 1 => reload_handle 97 | .reload(VERBOSE_LOG_LEVEL) 98 | .expect("Resetting log level failed (this is a bug)"), 99 | _ => reload_handle 100 | .reload(VERBOSE_VERBOSE_LOG_LEVEL) 101 | .expect("Resetting log level failed (this is a bug)"), 102 | } 103 | match cli_args.quiet { 104 | 0 => {} 105 | 1 => reload_handle 106 | .reload(QUIET_LOG_LEVEL) 107 | .expect("Resetting log level failed (this is a bug)"), 108 | _ => reload_handle 109 | .reload(QUIET_QUIET_LOG_LEVEL) 110 | .expect("Resetting log level failed (this is a bug)"), 111 | } 112 | #[cfg(feature = "deadlock-detection")] 113 | spawn_deadlock_detection(); 114 | match &cli_args.subcommand { 115 | #[cfg(feature = "client")] 116 | arg::Commands::Client(args) => client::client_main(args) 117 | .await 118 | .map_err(|e| Box::new(e.into()))?, 119 | #[cfg(feature = "server")] 120 | arg::Commands::Server(args) => server::server_main(args) 121 | .await 122 | .map_err(|e| Box::new(e.into()))?, 123 | } 124 | Ok(()) 125 | } 126 | 127 | #[cfg(all(feature = "rustls-native-roots", feature = "rustls-webpki-roots"))] 128 | compile_error!("Only one of rustls-native-roots and rustls-webpki-roots can be enabled at a time"); 129 | #[cfg(all(feature = "__rustls", feature = "nativetls"))] 130 | compile_error!( 131 | "Only one of rustls-native-roots, rustls-webpki-roots, and nativetls can be enabled at a time" 132 | ); 133 | #[cfg(all(feature = "tokio-console", feature = "remove-logging"))] 134 | compile_error!("tokio-console without trace-level logging is likely not desired"); 135 | -------------------------------------------------------------------------------- /src/mux/config.rs: -------------------------------------------------------------------------------- 1 | //! Multiplexor configuration 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | /// Configuration parameters for the multiplexor. 6 | /// See each method for details on the parameters. 7 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 8 | pub struct Options { 9 | pub(crate) keepalive_interval: crate::timing::OptionalDuration, 10 | pub(crate) datagram_buffer_size: usize, 11 | pub(crate) stream_buffer_size: usize, 12 | pub(crate) bind_buffer_size: usize, 13 | pub(crate) max_flow_id_retries: usize, 14 | pub(crate) rwnd: u32, 15 | pub(crate) default_rwnd_threshold: u32, 16 | } 17 | 18 | impl Default for Options { 19 | fn default() -> Self { 20 | Self::new() 21 | } 22 | } 23 | 24 | impl Options { 25 | /// Create a new [`Options`] instance with default values. 26 | #[must_use] 27 | pub const fn new() -> Self { 28 | const DATAGRAM_BUFFER_SIZE: usize = 1 << 9; 29 | const STREAM_BUFFER_SIZE: usize = 1 << 4; 30 | const MAX_FLOW_ID_RETRIES: usize = 3; 31 | 32 | #[cfg(not(test))] 33 | const RWND: u32 = 1 << 9; 34 | #[cfg(test)] 35 | const RWND: u32 = 4; 36 | #[cfg(not(test))] 37 | const DEFAULT_RWND_THRESHOLD: u32 = 1 << 8; 38 | #[cfg(test)] 39 | const DEFAULT_RWND_THRESHOLD: u32 = RWND; 40 | Self { 41 | keepalive_interval: crate::timing::OptionalDuration::NONE, 42 | datagram_buffer_size: DATAGRAM_BUFFER_SIZE, 43 | stream_buffer_size: STREAM_BUFFER_SIZE, 44 | bind_buffer_size: 0, 45 | max_flow_id_retries: MAX_FLOW_ID_RETRIES, 46 | rwnd: RWND, 47 | default_rwnd_threshold: DEFAULT_RWND_THRESHOLD, 48 | } 49 | } 50 | 51 | /// Sets the interval at which to send [`Ping`](crate::ws::Message::Ping) frames. 52 | #[must_use] 53 | pub const fn keepalive_interval(mut self, interval: crate::timing::OptionalDuration) -> Self { 54 | self.keepalive_interval = interval; 55 | self 56 | } 57 | 58 | /// Number of datagram frames to buffer in the channels on the receiving end. 59 | /// If the buffer is not read fast enough, excess datagrams will be dropped. 60 | /// 61 | /// # Panics 62 | /// Panics if the buffer size is not positive. 63 | #[must_use] 64 | pub const fn datagram_buffer_size(mut self, size: usize) -> Self { 65 | assert!(size > 0, "datagram_buffer_size must be greater than 0"); 66 | self.datagram_buffer_size = size; 67 | self 68 | } 69 | 70 | /// Number of `MuxStream`s to buffer in the channels on the receiving end. 71 | /// Since there is a handshake to obtain `MuxStream`s, there should be no 72 | /// need to have a crazy high buffer size. 73 | /// 74 | /// # Panics 75 | /// Panics if the buffer size is not positive. 76 | #[must_use] 77 | pub const fn stream_buffer_size(mut self, size: usize) -> Self { 78 | assert!(size > 0, "stream_buffer_size must be greater than 0"); 79 | self.stream_buffer_size = size; 80 | self 81 | } 82 | 83 | /// Number of [`Bind`](crate::frame::OpCode::Bind) requests to buffer 84 | /// in the channels on the receiving end. 85 | /// Setting this to zero disallows the multiplexor from accepting any 86 | /// `Bind` requests from the other end and 87 | /// is the default. Make sure the security implications are understood 88 | /// before enabling this. 89 | #[must_use] 90 | pub const fn bind_buffer_size(mut self, size: usize) -> Self { 91 | self.bind_buffer_size = size; 92 | self 93 | } 94 | 95 | /// Number of retries for establishing a connection if the other end rejects our `flow_id` selection. 96 | /// 97 | /// # Panics 98 | /// Panics if the number of retries is not positive. 99 | #[must_use] 100 | pub const fn max_flow_id_retries(mut self, retries: usize) -> Self { 101 | assert!(retries > 0, "max_flow_id_retries must be greater than 0"); 102 | self.max_flow_id_retries = retries; 103 | self 104 | } 105 | 106 | /// Number of `StreamFrame`s to buffer in `MuxStream`'s channels before blocking. 107 | /// 108 | /// # Panics 109 | /// Panics if the buffer size is not positive or does not fit in a `usize`. 110 | #[must_use] 111 | #[allow(clippy::cast_possible_truncation)] 112 | pub const fn rwnd(mut self, rwnd: u32) -> Self { 113 | // Make sure this value fits in a usize 114 | assert!((rwnd as usize) as u32 == rwnd, "rwnd must fit in a usize"); 115 | assert!(rwnd > 0, "rwnd must be greater than 0"); 116 | self.rwnd = rwnd; 117 | self 118 | } 119 | 120 | /// Number of [`Push`](crate::frame::OpCode::Push) frames between [`Acknowledge`](crate::frame::OpCode::Acknowledge)s: 121 | /// If too low, `Acknowledge`s will consume too much bandwidth; 122 | /// If too high, writers may block. 123 | /// 124 | /// Note that if the peer indicates a lower `rwnd` value in the handshake, 125 | /// this value will be ignored for that connection. 126 | /// 127 | /// # Panics 128 | /// Panics if the value is not positive. 129 | #[must_use] 130 | pub const fn default_rwnd_threshold(mut self, threshold: u32) -> Self { 131 | assert!( 132 | threshold > 0, 133 | "default_rwnd_threshold must be greater than 0" 134 | ); 135 | self.default_rwnd_threshold = threshold; 136 | self 137 | } 138 | } 139 | 140 | #[cfg(test)] 141 | mod tests { 142 | use super::*; 143 | use std::time::Duration; 144 | 145 | #[test] 146 | fn test_options() { 147 | let options = Options::new() 148 | .keepalive_interval(Duration::from_secs(100).into()) 149 | .datagram_buffer_size(33) 150 | .stream_buffer_size(44) 151 | .bind_buffer_size(55) 152 | .max_flow_id_retries(66) 153 | .rwnd(77) 154 | .default_rwnd_threshold(88); 155 | assert_eq!(options.keepalive_interval, Duration::from_secs(100).into()); 156 | assert_eq!(options.datagram_buffer_size, 33); 157 | assert_eq!(options.stream_buffer_size, 44); 158 | assert_eq!(options.bind_buffer_size, 55); 159 | assert_eq!(options.max_flow_id_retries, 66); 160 | assert_eq!(options.rwnd, 77); 161 | assert_eq!(options.default_rwnd_threshold, 88); 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/mux/dupe.rs: -------------------------------------------------------------------------------- 1 | //! Marker trait for types that can be cheaply cloned. 2 | //! 3 | //! Inspired by facebook/gazebo's `Dupe`. 4 | // 5 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 6 | 7 | /// Marker trait for types that can be cheaply cloned. 8 | pub trait Dupe { 9 | /// A cheap clone of the object. 10 | #[must_use] 11 | fn dupe(&self) -> Self; 12 | } 13 | 14 | macro_rules! impl_dupe_as_clone { 15 | ($t:ty => $($g:ident),* $(,)?) => { 16 | impl<$($g),*> Dupe for $t { 17 | #[inline] 18 | fn dupe(&self) -> Self { 19 | self.clone() 20 | } 21 | } 22 | }; 23 | ($($t:ty => ($($g:ident),* $(,)?)),* $(,)?) => { 24 | $(impl_dupe_as_clone!($t => $($g),*);)* 25 | }; 26 | } 27 | 28 | impl_dupe_as_clone! { 29 | // `Bytes` is a reference-counted type. 30 | bytes::Bytes => (), 31 | // `HeaderValue` is a wrapper around `Bytes`. 32 | http::header::HeaderValue => (), 33 | // `Authority` is a wrapper around `Bytes`. 34 | http::uri::Authority => (), 35 | // `Scheme` by default is a wrapper around `Bytes`. 36 | http::uri::Scheme => (), 37 | // `PathAndQuery` is a wrapper around `Bytes`. 38 | http::uri::PathAndQuery => (), 39 | // `Uri` is the combination of the above. 40 | http::Uri => (), 41 | // `Arc` is a reference-counted type. 42 | std::sync::Arc => (T), 43 | // `Sender` is designed to be cheaply cloned. 44 | tokio::sync::mpsc::Sender => (T), 45 | // `UnboundedSender` is designed to be cheaply cloned. 46 | tokio::sync::mpsc::UnboundedSender => (T), 47 | // `broadcast::Sender` is designed to be cheaply cloned. 48 | tokio::sync::broadcast::Sender => (T), 49 | } 50 | 51 | #[cfg(loom)] 52 | impl_dupe_as_clone! { 53 | loom::sync::Arc => (T), 54 | } 55 | -------------------------------------------------------------------------------- /src/mux/loom.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(loom, test))] 2 | pub use self::lock::{AtomicWaker, Mutex, RwLock}; 3 | #[cfg(not(all(loom, test)))] 4 | pub use futures_util::task::AtomicWaker; 5 | #[cfg(all(loom, test))] 6 | pub use loom::sync::{ 7 | Arc, 8 | atomic::{AtomicBool, AtomicU32, Ordering}, 9 | }; 10 | #[cfg(not(all(loom, test)))] 11 | pub use parking_lot::{Mutex, RwLock}; 12 | #[cfg(not(all(loom, test)))] 13 | pub use std::sync::{ 14 | Arc, 15 | atomic::{AtomicBool, AtomicU32, Ordering}, 16 | }; 17 | 18 | #[cfg(all(loom, test))] 19 | mod lock { 20 | #[derive(Debug)] 21 | pub struct Mutex(loom::sync::Mutex); 22 | impl Mutex { 23 | pub fn new(t: T) -> Self { 24 | Self(loom::sync::Mutex::new(t)) 25 | } 26 | pub fn lock(&self) -> loom::sync::MutexGuard<'_, T> { 27 | self.0.lock().expect("Poisoned `Mutex`") 28 | } 29 | } 30 | 31 | #[derive(Debug)] 32 | pub struct RwLock(loom::sync::RwLock); 33 | impl RwLock { 34 | pub fn new(t: T) -> Self { 35 | Self(loom::sync::RwLock::new(t)) 36 | } 37 | pub fn read(&self) -> loom::sync::RwLockReadGuard<'_, T> { 38 | self.0.read().expect("Poisoned `RwLock`") 39 | } 40 | pub fn write(&self) -> loom::sync::RwLockWriteGuard<'_, T> { 41 | self.0.write().expect("Poisoned `RwLock`") 42 | } 43 | } 44 | 45 | #[derive(Debug)] 46 | pub struct AtomicWaker(loom::future::AtomicWaker); 47 | impl AtomicWaker { 48 | pub fn new() -> Self { 49 | Self(loom::future::AtomicWaker::new()) 50 | } 51 | pub fn register(&self, waker: &std::task::Waker) { 52 | self.0.register_by_ref(waker) 53 | } 54 | pub fn wake(&self) { 55 | self.0.wake() 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/mux/proto_version.rs: -------------------------------------------------------------------------------- 1 | /// Current mux and Penguin protocol version 2 | pub const PROTOCOL_VERSION: &str = "penguin-v7"; 3 | 4 | /// Current mux and Penguin protocol version, only the version number 5 | pub const PROTOCOL_VERSION_NUMBER: u8 = 7; 6 | -------------------------------------------------------------------------------- /src/mux/timing.rs: -------------------------------------------------------------------------------- 1 | //! Various timing utilities. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use std::{ 6 | fmt::{self, Debug}, 7 | time::Duration, 8 | }; 9 | 10 | /// Exponential backoff for retrying failed requests. 11 | #[derive(Copy, Clone, Debug)] 12 | pub struct Backoff { 13 | /// Initial backoff duration. 14 | initial: Duration, 15 | /// Maximum backoff duration. 16 | /// If the backoff duration exceeds this value, it will be clamped to this value. 17 | max: Duration, 18 | /// Backoff multiplier. 19 | mult: u32, 20 | /// Maximum number of retries. 21 | /// If the retry count exceeds this value, the backoff generator will return `None`. 22 | /// If this value is `0`, the backoff generator will never return `None`. 23 | max_count: u32, 24 | /// Current backoff duration. 25 | current: Duration, 26 | /// Current retry count. 27 | count: u32, 28 | } 29 | 30 | impl Backoff { 31 | /// Create a new backoff generator. 32 | #[must_use] 33 | pub const fn new(initial: Duration, max: Duration, mult: u32, max_count: u32) -> Self { 34 | Self { 35 | initial, 36 | max, 37 | mult, 38 | max_count, 39 | current: initial, 40 | count: 0, 41 | } 42 | } 43 | 44 | /// Advance to the next backoff duration and return the previous duration. 45 | pub fn advance(&mut self) -> Option { 46 | if self.max_count != 0 && self.count >= self.max_count { 47 | return None; 48 | } 49 | self.count += 1; 50 | 51 | let old = self.current.min(self.max); 52 | self.current = old * self.mult; 53 | Some(old) 54 | } 55 | 56 | /// Reset the backoff generator. 57 | pub const fn reset(&mut self) { 58 | self.current = self.initial; 59 | self.count = 0; 60 | } 61 | } 62 | 63 | /// An optional duration: an empty duration means that there should be no timeout, 64 | /// or that an interval should be infinite. 65 | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] 66 | pub struct OptionalDuration(Option); 67 | 68 | impl OptionalDuration { 69 | /// The special constant representing "no timeout" or "indefinite". 70 | pub const NONE: Self = Self(None); 71 | 72 | /// Convenience method to create an `OptionalDuration` of the given number of seconds 73 | #[must_use] 74 | pub const fn from_secs(duration: u64) -> Self { 75 | Self(Some(Duration::from_secs(duration))) 76 | } 77 | 78 | /// Use the optional duration to timeout a future 79 | /// 80 | /// # Errors 81 | /// Returns an `Err` variant if the future does not finish in the specified duration. 82 | pub async fn timeout( 83 | self, 84 | future: T, 85 | ) -> Result { 86 | match self.0 { 87 | Some(duration) => tokio::time::timeout(duration, future).await, 88 | None => Ok(future.await), 89 | } 90 | } 91 | 92 | /// Use the optional duration to sleep 93 | pub async fn sleep(self) { 94 | match self.0 { 95 | Some(duration) => tokio::time::sleep(duration).await, 96 | None => std::future::pending().await, 97 | } 98 | } 99 | } 100 | 101 | impl std::str::FromStr for OptionalDuration { 102 | type Err = std::num::ParseIntError; 103 | 104 | fn from_str(s: &str) -> Result { 105 | let value = s.parse::()?; 106 | if value == 0 { 107 | Ok(Self(None)) 108 | } else { 109 | Ok(Self::from_secs(value)) 110 | } 111 | } 112 | } 113 | 114 | impl fmt::Display for OptionalDuration { 115 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 116 | match self.0 { 117 | Some(duration) => duration.fmt(f), 118 | None => write!(f, "indefinite"), 119 | } 120 | } 121 | } 122 | 123 | impl From for OptionalDuration { 124 | fn from(duration: Duration) -> Self { 125 | if duration.is_zero() { 126 | Self(None) 127 | } else { 128 | Self(Some(duration)) 129 | } 130 | } 131 | } 132 | 133 | impl From for Option { 134 | fn from(opt_dur: OptionalDuration) -> Self { 135 | opt_dur.0 136 | } 137 | } 138 | 139 | /// An optional interval 140 | #[derive(Debug, Default)] 141 | pub struct OptionalInterval(Option); 142 | 143 | impl OptionalInterval { 144 | /// Defines the behavior of the internal [`tokio::time::Interval`] when it misses a tick. 145 | pub fn set_missed_tick_behavior(&mut self, behavior: tokio::time::MissedTickBehavior) { 146 | if let Some(interval) = &mut self.0 { 147 | interval.set_missed_tick_behavior(behavior); 148 | } 149 | } 150 | 151 | /// Completes when the next instant in the interval has been reached. 152 | pub async fn tick(&mut self) -> tokio::time::Instant { 153 | if let Some(interval) = &mut self.0 { 154 | interval.tick().await 155 | } else { 156 | // We shall never resolve 157 | std::future::pending::().await 158 | } 159 | } 160 | } 161 | 162 | impl From for OptionalInterval { 163 | fn from(dur: OptionalDuration) -> Self { 164 | Self(dur.0.map(tokio::time::interval)) 165 | } 166 | } 167 | 168 | #[cfg(test)] 169 | mod tests { 170 | use super::*; 171 | use futures_util::FutureExt; 172 | 173 | #[test] 174 | fn test_backoff() { 175 | crate::tests::setup_logging(); 176 | let mut backoff = Backoff::new(Duration::from_millis(10), Duration::from_secs(1), 2, 5); 177 | assert_eq!(backoff.advance(), Some(Duration::from_millis(10))); 178 | assert_eq!(backoff.advance(), Some(Duration::from_millis(20))); 179 | assert_eq!(backoff.advance(), Some(Duration::from_millis(40))); 180 | assert_eq!(backoff.advance(), Some(Duration::from_millis(80))); 181 | assert_eq!(backoff.advance(), Some(Duration::from_millis(160))); 182 | assert_eq!(backoff.advance(), None); 183 | backoff.reset(); 184 | assert_eq!(backoff.advance(), Some(Duration::from_millis(10))); 185 | backoff.reset(); 186 | assert_eq!(backoff.advance(), Some(Duration::from_millis(10))); 187 | assert_eq!(backoff.advance(), Some(Duration::from_millis(20))); 188 | assert_eq!(backoff.advance(), Some(Duration::from_millis(40))); 189 | assert_eq!(backoff.advance(), Some(Duration::from_millis(80))); 190 | assert_eq!(backoff.advance(), Some(Duration::from_millis(160))); 191 | assert_eq!(backoff.advance(), None); 192 | assert_eq!(backoff.advance(), None); 193 | let mut backoff = Backoff::new(Duration::from_secs(10), Duration::from_secs(1), 2, 0); 194 | assert_eq!(backoff.advance(), Some(Duration::from_secs(1))); 195 | assert_eq!(backoff.advance(), Some(Duration::from_secs(1))); 196 | assert_eq!(backoff.advance(), Some(Duration::from_secs(1))); 197 | } 198 | 199 | #[test] 200 | fn test_optional_duration() { 201 | crate::tests::setup_logging(); 202 | let std_dur = Duration::from_secs(0); 203 | let opt_dur = OptionalDuration::from(std_dur); 204 | assert_eq!(opt_dur, OptionalDuration::NONE); 205 | let std_dur = Duration::from_secs(10); 206 | let opt_dur = OptionalDuration::from(std_dur); 207 | assert_eq!(opt_dur, OptionalDuration::from_secs(10)); 208 | let dur = OptionalDuration::from_secs(10); 209 | assert_eq!(dur.to_string(), "10s"); 210 | let dur_none = OptionalDuration::NONE; 211 | assert_eq!(dur_none.to_string(), "indefinite"); 212 | let parsed: OptionalDuration = "20".parse().unwrap(); 213 | assert_eq!(parsed.to_string(), "20s"); 214 | let parsed_none: OptionalDuration = "0".parse().unwrap(); 215 | assert_eq!(parsed_none, OptionalDuration::NONE); 216 | } 217 | 218 | #[tokio::test] 219 | #[cfg(not(loom))] 220 | async fn test_optional_interval() { 221 | crate::tests::setup_logging(); 222 | let dur = OptionalDuration::from_secs(2); 223 | let mut interval = OptionalInterval::from(dur); 224 | interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); 225 | assert!(interval.tick().now_or_never().is_none()); 226 | tokio::time::sleep(Duration::from_secs(3)).await; 227 | let instant = interval.tick().now_or_never().unwrap(); 228 | assert!(instant < tokio::time::Instant::now()); 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /src/mux/ws.rs: -------------------------------------------------------------------------------- 1 | //! Generic WebSocket 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use bytes::Bytes; 6 | use std::task::{Context, Poll}; 7 | 8 | /// Types of messages we need 9 | #[derive(Clone, PartialEq, Eq)] 10 | pub enum Message { 11 | /// Binary message or any payload 12 | Binary(Bytes), 13 | /// Ping message. Note that the payload is discarded. 14 | Ping, 15 | /// Pong message. Note that the payload is discarded. 16 | Pong, 17 | /// Close message. Note that the payload is discarded. 18 | Close, 19 | } 20 | 21 | impl std::fmt::Debug for Message { 22 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 23 | match self { 24 | Self::Binary(data) => f.debug_struct("Binary").field("len", &data.len()).finish(), 25 | Self::Ping => f.debug_struct("Ping").finish(), 26 | Self::Pong => f.debug_struct("Pong").finish(), 27 | Self::Close => f.debug_struct("Close").finish(), 28 | } 29 | } 30 | } 31 | 32 | /// A generic WebSocket stream 33 | /// 34 | /// Specialized for our [`Message`] type similar to [`futures_util::Stream`] and [`futures_util::Sink`]. 35 | /// See [`futures_util::Stream`] and [`futures_util::Sink`] for more details on the required methods. 36 | pub trait WebSocket: Send + 'static { 37 | /// Attempt to prepare the `Sink` to receive a value. 38 | /// 39 | /// # Errors 40 | /// Indicates the underlying sink is permanently be unable to receive items. 41 | fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll>; 42 | /// Begin the process of sending a value to the sink. 43 | /// 44 | /// # Errors 45 | /// Indicates the underlying sink is permanently be unable to receive items. 46 | fn start_send_unpin(&mut self, item: Message) -> Result<(), crate::Error>; 47 | /// Flush any remaining output from this sink. 48 | /// 49 | /// # Errors 50 | /// Indicates the underlying sink is permanently be unable to receive items. 51 | fn poll_flush_unpin(&mut self, cx: &mut Context<'_>) -> Poll>; 52 | /// Flush any remaining output and close this sink, if necessary. 53 | /// 54 | /// # Errors 55 | /// Indicates the underlying sink is unable to be closed properly but is nonetheless 56 | /// permanently be unable to receive items. 57 | fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll>; 58 | /// Attempt to pull out the next value of this stream. 59 | /// 60 | /// # Errors 61 | /// Indicates the underlying stream is otherwise unable to produce items. 62 | fn poll_next_unpin( 63 | &mut self, 64 | cx: &mut Context<'_>, 65 | ) -> Poll>>; 66 | } 67 | 68 | #[cfg(feature = "tungstenite")] 69 | mod tokio_tungstenite { 70 | use std::{ 71 | pin::Pin, 72 | task::{Context, Poll}, 73 | }; 74 | 75 | use bytes::Bytes; 76 | use futures_util::{Sink, Stream}; 77 | use tokio_tungstenite::tungstenite; 78 | use tracing::error; 79 | 80 | use super::{Message, WebSocket}; 81 | impl From for Message { 82 | #[inline] 83 | fn from(msg: tungstenite::Message) -> Self { 84 | match msg { 85 | tungstenite::Message::Binary(data) => Self::Binary(data), 86 | tungstenite::Message::Text(text) => { 87 | error!("Received text message: {text}"); 88 | Self::Binary(Bytes::from(text)) 89 | } 90 | tungstenite::Message::Ping(_) => Self::Ping, 91 | tungstenite::Message::Pong(_) => Self::Pong, 92 | tungstenite::Message::Close(_) => Self::Close, 93 | tungstenite::Message::Frame(_) => { 94 | unreachable!("`Frame` message should not be received") 95 | } 96 | } 97 | } 98 | } 99 | 100 | impl From for tungstenite::Message { 101 | #[inline] 102 | fn from(msg: Message) -> Self { 103 | match msg { 104 | Message::Binary(data) => Self::Binary(data), 105 | Message::Ping => Self::Ping(Bytes::new()), 106 | Message::Pong => Self::Pong(Bytes::new()), 107 | Message::Close => Self::Close(None), 108 | } 109 | } 110 | } 111 | 112 | impl From for crate::Error { 113 | #[inline] 114 | fn from(e: tungstenite::Error) -> Self { 115 | Self::WebSocket(Box::new(e)) 116 | } 117 | } 118 | 119 | impl WebSocket for tokio_tungstenite::WebSocketStream 120 | where 121 | RW: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static, 122 | { 123 | #[inline] 124 | fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll> { 125 | Pin::new(self).poll_ready(cx).map_err(Into::into) 126 | } 127 | 128 | #[inline] 129 | fn start_send_unpin(&mut self, item: Message) -> Result<(), crate::Error> { 130 | let item: tungstenite::Message = item.into(); 131 | Pin::new(self).start_send(item).map_err(Into::into) 132 | } 133 | 134 | #[inline] 135 | fn poll_flush_unpin(&mut self, cx: &mut Context<'_>) -> Poll> { 136 | Pin::new(self).poll_flush(cx).map_err(Into::into) 137 | } 138 | 139 | #[inline] 140 | fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll> { 141 | let this = Pin::new(self); 142 | futures_util::Sink::poll_close(this, cx).map_err(Into::into) 143 | } 144 | 145 | #[inline] 146 | fn poll_next_unpin( 147 | &mut self, 148 | cx: &mut Context<'_>, 149 | ) -> Poll>> { 150 | Pin::new(self) 151 | .poll_next(cx) 152 | .map(|opt| opt.map(|res| res.map(Into::into).map_err(Into::into))) 153 | } 154 | } 155 | 156 | #[cfg(test)] 157 | mod tests { 158 | use super::*; 159 | use tokio_tungstenite::tungstenite::protocol::frame::coding::CloseCode; 160 | 161 | #[test] 162 | fn test_binary_message() { 163 | let msg = tungstenite::Message::Binary(Bytes::from_static(b"Hello")); 164 | let converted: Message = msg.clone().into(); 165 | assert_eq!(converted, Message::Binary(Bytes::from_static(b"Hello"))); 166 | assert_eq!(tungstenite::Message::from(converted), msg); 167 | } 168 | 169 | #[test] 170 | fn test_text_message() { 171 | let msg = tungstenite::Message::Text("Hello".into()); 172 | let converted: Message = msg.into(); 173 | assert_eq!(converted, Message::Binary(Bytes::from_static(b"Hello"))); 174 | assert_eq!( 175 | tungstenite::Message::from(converted), 176 | tungstenite::Message::Binary(Bytes::from_static(b"Hello")) 177 | ); 178 | } 179 | 180 | #[test] 181 | fn test_ping_message() { 182 | let msg = tungstenite::Message::Ping(Bytes::from_static(b"Ping")); 183 | let converted: Message = msg.into(); 184 | assert_eq!(converted, Message::Ping); 185 | assert_eq!( 186 | tungstenite::Message::from(converted), 187 | tungstenite::Message::Ping(Bytes::new()) 188 | ); 189 | 190 | let msg = tungstenite::Message::Pong(Bytes::from_static(b"Pong")); 191 | let converted: Message = msg.into(); 192 | assert_eq!(converted, Message::Pong); 193 | assert_eq!( 194 | tungstenite::Message::from(converted), 195 | tungstenite::Message::Pong(Bytes::new()) 196 | ); 197 | } 198 | 199 | #[test] 200 | fn test_close_message() { 201 | let close_msg = 202 | tungstenite::Message::Close(Some(tungstenite::protocol::frame::CloseFrame { 203 | code: CloseCode::Reserved(1000), 204 | reason: "Normal".into(), 205 | })); 206 | let converted: Message = close_msg.into(); 207 | assert_eq!(converted, Message::Close); 208 | } 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/server/acme/challenge_helper.rs: -------------------------------------------------------------------------------- 1 | use super::Error; 2 | use instant_acme::{Authorization, AuthorizationStatus, ChallengeType, Order}; 3 | use std::{ 4 | ffi::{OsStr, OsString}, 5 | path::PathBuf, 6 | }; 7 | use tracing::{debug, error}; 8 | 9 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 10 | pub enum Action { 11 | /// Create a new challenge file 12 | Create, 13 | /// Remove the challenge file after use 14 | Remove, 15 | } 16 | 17 | impl Action { 18 | const fn as_str(self) -> &'static str { 19 | match self { 20 | Self::Create => "create", 21 | Self::Remove => "remove", 22 | } 23 | } 24 | } 25 | 26 | /// An external command to create or remove a challenge file for ACME validation 27 | #[derive(Clone)] 28 | pub struct ChallengeHelper(OsString); 29 | 30 | impl std::fmt::Debug for ChallengeHelper { 31 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 32 | self.0.fmt(f) 33 | } 34 | } 35 | 36 | impl From for ChallengeHelper { 37 | fn from(path: OsString) -> Self { 38 | Self(path) 39 | } 40 | } 41 | 42 | impl From for ChallengeHelper { 43 | fn from(path: PathBuf) -> Self { 44 | Self(path.into_os_string()) 45 | } 46 | } 47 | 48 | impl AsRef for ChallengeHelper { 49 | fn as_ref(&self) -> &OsStr { 50 | &self.0 51 | } 52 | } 53 | 54 | impl ChallengeHelper { 55 | pub fn call( 56 | &self, 57 | action: Action, 58 | key_authorization: &str, 59 | ) -> Result { 60 | debug!("executing challenge helper: {self:?} {key_authorization}"); 61 | let cmd = tokio::process::Command::new(self) 62 | .arg(action.as_str()) 63 | .arg(key_authorization) 64 | .stdin(std::process::Stdio::piped()) 65 | .stdout(std::process::Stdio::piped()) 66 | .spawn()?; 67 | Ok(cmd) 68 | } 69 | 70 | /// Process a single challenge 71 | pub async fn one_challenge<'a>( 72 | &self, 73 | auth: &'a Authorization, 74 | order: &Order, 75 | ) -> Result, Error> { 76 | // Find the HTTP-01 challenge for each pending authorization 77 | let http_challenge = match auth.status { 78 | AuthorizationStatus::Valid => return Ok(None), // Already valid, no need to process 79 | AuthorizationStatus::Pending => auth 80 | .challenges 81 | .iter() 82 | .find(|c| c.r#type == ChallengeType::Http01) 83 | .ok_or(Error::NoHttp01ChallengeSupport), 84 | _ => Err(Error::AuthInvalid(auth.status)), 85 | }?; 86 | let key_auth = order.key_authorization(http_challenge).as_str().to_string(); 87 | // Execute the challenge helper to create the file 88 | self.call(Action::Create, &key_auth)? 89 | .wait() 90 | .await 91 | .map_err(Error::ChallengeHelperExecution)?; 92 | debug!("processing for {key_auth} succeeded"); 93 | Ok(Some((key_auth, &http_challenge.url))) 94 | } 95 | 96 | /// Process challenge files for the HTTP-01 challenge 97 | pub async fn process_challenges( 98 | &self, 99 | authorizations: &[Authorization], 100 | order: &mut instant_acme::Order, 101 | ) -> Result, Error> { 102 | let mut executed_challenges = Vec::with_capacity(authorizations.len()); 103 | for auth in authorizations { 104 | match self.one_challenge(auth, order).await { 105 | Ok(Some((key_auth, challenge_url))) => { 106 | executed_challenges.push(key_auth); 107 | // Tell the server we are ready for the challenges 108 | order.set_challenge_ready(challenge_url).await?; 109 | } 110 | Ok(None) => {} 111 | Err(e) => { 112 | for key_auth in &executed_challenges { 113 | // Clean up any previously created challenge files on error 114 | let _ = self.call(Action::Remove, key_auth); 115 | } 116 | error!("Failed to process challenge: {e}"); 117 | return Err(e); 118 | } 119 | } 120 | } 121 | Ok(executed_challenges) 122 | } 123 | } 124 | 125 | #[cfg(test)] 126 | mod tests { 127 | use super::super::tests::*; 128 | #[cfg(feature = "tests-acme-has-pebble")] 129 | use super::super::tests_need_pebble::*; 130 | use super::*; 131 | #[cfg(feature = "tests-acme-has-pebble")] 132 | use instant_acme::{Account, Identifier, NewAccount, NewOrder}; 133 | use tempfile::tempdir; 134 | use tokio::io::AsyncReadExt; 135 | 136 | #[tokio::test] 137 | async fn test_call_challenge_helper_simple() { 138 | crate::tests::setup_logging(); 139 | let expected_out1 = "create f86oS4UZR6kX5U31VVc05dhOa-GMEvU3RL1Q64fVaKY.tvg9X8xCoUuU_vK9qNR1d2RyGSGVfq3VYDJ-O81nnyY\n"; 140 | let helper = ChallengeHelper(OsString::from("echo")); 141 | let result = helper.call(Action::Create, TEST_KEY_AUTH); 142 | let child = result.unwrap(); 143 | let out = child.wait_with_output().await.unwrap(); 144 | assert!(out.status.success()); 145 | let stdout = String::from_utf8(out.stdout).unwrap(); 146 | assert_eq!(stdout, expected_out1); 147 | let expected_out2 = "remove f86oS4UZR6kX5U31VVc05dhOa-GMEvU3RL1Q64fVaKY.tvg9X8xCoUuU_vK9qNR1d2RyGSGVfq3VYDJ-O81nnyY\n"; 148 | let result = helper.call(Action::Remove, TEST_KEY_AUTH); 149 | let child = result.unwrap(); 150 | let out = child.wait_with_output().await.unwrap(); 151 | assert!(out.status.success()); 152 | let stdout = String::from_utf8(out.stdout).unwrap(); 153 | assert_eq!(stdout, expected_out2); 154 | } 155 | 156 | #[tokio::test] 157 | #[cfg(not(target_os = "windows"))] 158 | async fn test_call_challenge_helper_example() { 159 | crate::tests::setup_logging(); 160 | let script_path = format!( 161 | "{}/.github/workflows/http01_helper_for_test.sh", 162 | env!("CARGO_MANIFEST_DIR") 163 | ); 164 | let tmpdir = tempdir().unwrap(); 165 | let actual_path = tmpdir.path().join("http01_helper.sh"); 166 | tokio::fs::copy(&script_path, &actual_path).await.unwrap(); 167 | // Wait until the file is ready (for Linux CI runs) 168 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 169 | let helper = ChallengeHelper::from(actual_path); 170 | helper 171 | .call(Action::Create, TEST_KEY_AUTH) 172 | .unwrap() 173 | .wait() 174 | .await 175 | .unwrap(); 176 | let expected_out = tmpdir 177 | .path() 178 | .join(".well-known/acme-challenge") 179 | .join(TEST_TOKEN); 180 | assert!(expected_out.exists(), "Challenge file was not created"); 181 | let mut content = String::new(); 182 | tokio::fs::File::open(&expected_out) 183 | .await 184 | .unwrap() 185 | .read_to_string(&mut content) 186 | .await 187 | .unwrap(); 188 | assert_eq!(content.trim(), TEST_KEY_AUTH); 189 | 190 | helper 191 | .call(Action::Remove, TEST_KEY_AUTH) 192 | .unwrap() 193 | .wait() 194 | .await 195 | .unwrap(); 196 | assert!(!expected_out.exists(), "Challenge file was not removed"); 197 | } 198 | 199 | #[cfg(feature = "tests-acme-has-pebble")] 200 | #[cfg(not(target_os = "windows"))] 201 | #[tokio::test] 202 | async fn test_process_one_challenge() { 203 | crate::tests::setup_logging(); 204 | let script_path = format!( 205 | "{}/.github/workflows/http01_helper_for_test.sh", 206 | env!("CARGO_MANIFEST_DIR") 207 | ); 208 | let tmpdir = tempdir().unwrap(); 209 | let actual_path = tmpdir.path().join("http01_helper.sh"); 210 | tokio::fs::copy(&script_path, &actual_path).await.unwrap(); 211 | // Wait until the file is ready (for Linux CI runs) 212 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 213 | let helper = ChallengeHelper::from(actual_path); 214 | let (account, _cred) = Account::create_with_http( 215 | &NewAccount { 216 | contact: &[], 217 | terms_of_service_agreed: true, 218 | only_return_existing: false, 219 | }, 220 | TEST_PEBBLE_URL, 221 | None, 222 | Box::new(IgnoreTlsHttpClient::new().await), 223 | ) 224 | .await 225 | .unwrap(); 226 | let identifier = Identifier::Dns("a.example.com".to_string()); 227 | let new_order = NewOrder { 228 | identifiers: &[identifier], 229 | }; 230 | let mut order = account.new_order(&new_order).await.unwrap(); 231 | let authorizations = order.authorizations().await.unwrap(); 232 | let first_auth = authorizations.first().unwrap(); 233 | assert!( 234 | first_auth.status == AuthorizationStatus::Pending, 235 | "temporary error in test setup: auth status is {:?} instead of Pending", 236 | first_auth.status 237 | ); 238 | let first_auth_challenge = first_auth 239 | .challenges 240 | .iter() 241 | .find(|c| c.r#type == ChallengeType::Http01) 242 | .unwrap(); 243 | let expected_key_auth = order 244 | .key_authorization(first_auth_challenge) 245 | .as_str() 246 | .to_string(); 247 | let (keyauth, _url) = helper 248 | .one_challenge(first_auth, &order) 249 | .await 250 | .unwrap() 251 | .unwrap(); 252 | assert_eq!(keyauth, expected_key_auth); 253 | let token = expected_key_auth.split('.').next().unwrap(); 254 | let verify_location = tmpdir.path().join(".well-known/acme-challenge").join(token); 255 | assert!( 256 | verify_location.exists(), 257 | "Challenge file was not created at expected location" 258 | ); 259 | let mut content = String::new(); 260 | tokio::fs::File::open(&verify_location) 261 | .await 262 | .unwrap() 263 | .read_to_string(&mut content) 264 | .await 265 | .unwrap(); 266 | assert_eq!(content.trim(), expected_key_auth); 267 | } 268 | 269 | #[cfg(feature = "tests-acme-has-pebble")] 270 | #[cfg(not(target_os = "windows"))] 271 | #[tokio::test] 272 | async fn test_process_challenges() { 273 | crate::tests::setup_logging(); 274 | let script_path = format!( 275 | "{}/.github/workflows/http01_helper_for_test.sh", 276 | env!("CARGO_MANIFEST_DIR") 277 | ); 278 | let tmpdir = tempdir().unwrap(); 279 | let actual_path = tmpdir.path().join("http01_helper.sh"); 280 | tokio::fs::copy(&script_path, &actual_path).await.unwrap(); 281 | // Wait until the file is ready (for Linux CI runs) 282 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 283 | let helper = ChallengeHelper::from(actual_path); 284 | let (account, _cred) = Account::create_with_http( 285 | &NewAccount { 286 | contact: &[], 287 | terms_of_service_agreed: true, 288 | only_return_existing: false, 289 | }, 290 | TEST_PEBBLE_URL, 291 | None, 292 | Box::new(IgnoreTlsHttpClient::new().await), 293 | ) 294 | .await 295 | .unwrap(); 296 | let identifiers = vec![ 297 | Identifier::Dns("a.example.com".to_string()), 298 | Identifier::Dns("b.example.com".to_string()), 299 | Identifier::Dns("c.example.com".to_string()), 300 | ]; 301 | let new_order = NewOrder { 302 | identifiers: &identifiers, 303 | }; 304 | let mut order = account.new_order(&new_order).await.unwrap(); 305 | let authorizations = order.authorizations().await.unwrap(); 306 | assert_eq!(authorizations.len(), 3); 307 | let expected_key_auths = authorizations 308 | .iter() 309 | .filter_map(|auth| { 310 | if auth.status == AuthorizationStatus::Pending { 311 | let http_challenge = auth 312 | .challenges 313 | .iter() 314 | .find(|c| c.r#type == ChallengeType::Http01); 315 | http_challenge.map(|c| order.key_authorization(c).as_str().to_string()) 316 | } else { 317 | None 318 | } 319 | }) 320 | .collect::>(); 321 | // Process the challenges 322 | let keyauths = helper 323 | .process_challenges(&authorizations, &mut order) 324 | .await 325 | .unwrap(); 326 | for expected_key_auth in &expected_key_auths { 327 | assert!(keyauths.contains(expected_key_auth)); 328 | } 329 | for keyauth in &keyauths { 330 | let token = keyauth.split('.').next().unwrap(); 331 | let verify_location = tmpdir.path().join(".well-known/acme-challenge").join(token); 332 | assert!(verify_location.exists()); 333 | let mut content = String::new(); 334 | tokio::fs::File::open(&verify_location) 335 | .await 336 | .unwrap() 337 | .read_to_string(&mut content) 338 | .await 339 | .unwrap(); 340 | assert_eq!(content.trim(), *keyauth); 341 | } 342 | } 343 | } 344 | -------------------------------------------------------------------------------- /src/server/forwarder.rs: -------------------------------------------------------------------------------- 1 | //! Server-side forwarding implementation. 2 | //! Pipes TCP streams or forwards UDP Datagrams to and from another host. 3 | // 4 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 5 | 6 | use crate::config; 7 | use penguin_mux::{Datagram, Dupe, MuxStream}; 8 | use std::net::SocketAddr; 9 | use thiserror::Error; 10 | use tokio::net::TcpStream; 11 | use tokio::{ 12 | net::{UdpSocket, lookup_host}, 13 | sync::mpsc, 14 | }; 15 | use tracing::{debug, trace}; 16 | 17 | /// Error type for the forwarder. 18 | #[derive(Error, Debug)] 19 | pub(super) enum Error { 20 | #[error(transparent)] 21 | Io(#[from] std::io::Error), 22 | #[error("Invalid host: {0}")] 23 | Host(#[from] std::str::Utf8Error), 24 | } 25 | 26 | /// Bind a UDP socket with the same address family as the given target, 27 | /// and return the bound socket and the matched target address. 28 | /// Note that we don't connect or send the socket here. 29 | #[inline] 30 | async fn bind_for_target(target: (&str, u16)) -> Result<(UdpSocket, SocketAddr), Error> { 31 | let targets = lookup_host(target).await?; 32 | let mut last_err = None; 33 | for target in targets { 34 | let socket = match if target.is_ipv4() { 35 | UdpSocket::bind(("0.0.0.0", 0)).await 36 | } else { 37 | UdpSocket::bind(("::", 0)).await 38 | } { 39 | Ok(socket) => socket, 40 | Err(e) => { 41 | last_err = Some(e); 42 | continue; 43 | } 44 | }; 45 | // `expect`: at this point `listener` should be bound. Otherwise, it's a bug. 46 | let local_addr = socket 47 | .local_addr() 48 | .expect("Failed to get local address of UDP socket (this is a bug)"); 49 | debug!("bound to {local_addr}"); 50 | return Ok((socket, target)); 51 | } 52 | Err(last_err 53 | .unwrap_or_else(|| { 54 | std::io::Error::new( 55 | std::io::ErrorKind::InvalidInput, 56 | "could not resolve to any address", 57 | ) 58 | }) 59 | .into()) 60 | } 61 | 62 | /// Sit on a random port, send a UDP datagram to the given target, 63 | /// and wait for a response in the following `UDP_PRUNE_TIMEOUT` seconds. 64 | #[tracing::instrument(skip_all, level = "debug", fields(flow_id = %format_args!("{:08x}", first_datagram_frame.flow_id)))] 65 | pub(super) async fn udp_forward_on( 66 | first_datagram_frame: Datagram, 67 | mut datagram_rx: mpsc::Receiver, 68 | datagram_tx: mpsc::Sender, 69 | ) -> Result<(), Error> { 70 | trace!("got datagram frame: {first_datagram_frame:?}"); 71 | let Datagram { 72 | target_host: rhost, 73 | target_port: rport, 74 | flow_id, 75 | data, 76 | } = first_datagram_frame; 77 | let rhost_str = std::str::from_utf8(&rhost)?; 78 | let (socket, target) = bind_for_target((rhost_str, rport)).await?; 79 | socket.send_to(&data, target).await?; 80 | trace!("sent UDP packet to {target}"); 81 | loop { 82 | // Reset this timeout each time we see traffic 83 | let this_round_timeout = tokio::time::sleep(config::UDP_PRUNE_TIMEOUT); 84 | let mut buf = vec![0; config::MAX_UDP_PACKET_SIZE]; 85 | tokio::select! { 86 | // Check if the socket has received a datagram 87 | Ok((len, addr)) = socket.recv_from(&mut buf) => { 88 | buf.truncate(len); 89 | trace!("got UDP response from {addr}"); 90 | let frame = Datagram { 91 | target_host: rhost.dupe(), 92 | target_port: rport, 93 | flow_id, 94 | data: buf.into(), 95 | }; 96 | if let Err(error) = datagram_tx.try_send(frame) { 97 | match error { 98 | mpsc::error::TrySendError::Closed(_) => { 99 | // The mux loop has exited 100 | trace!("UDP forwarder exiting due to closed mux"); 101 | break; 102 | } 103 | mpsc::error::TrySendError::Full(_) => { 104 | // The channel is full, so just discard the datagram 105 | debug!("UDP forwarder channel is full"); 106 | } 107 | } 108 | } 109 | } 110 | // Check if the channel has received a datagram 111 | Some(datagram_frame) = datagram_rx.recv() => { 112 | // If this returns `None`, the mux loop has exited 113 | // I don't want to handle this case here because 114 | // the timeout branch will handle it for us anyway. 115 | let target = ( 116 | std::str::from_utf8(&datagram_frame.target_host)?, 117 | datagram_frame.target_port, 118 | ); 119 | trace!("got new datagram frame: {datagram_frame:?} for {target:?}"); 120 | socket.send_to(&datagram_frame.data, target).await?; 121 | } 122 | // Check if the timeout has expired 123 | () = this_round_timeout => { 124 | trace!("UDP prune timeout expired"); 125 | break; 126 | } 127 | } 128 | } 129 | debug!("UDP forwarding finished"); 130 | Ok(()) 131 | } 132 | 133 | /// Start a TCP forwarding server on the given listener. 134 | /// 135 | /// This forwarder is trivial: it just pipes the TCP stream to and from the 136 | /// channel. 137 | /// 138 | /// # Errors 139 | /// It carries the errors from the underlying TCP or channel IO functions. 140 | #[tracing::instrument(skip_all, level = "debug")] 141 | pub(super) async fn tcp_forwarder_on_channel(channel: MuxStream) -> Result<(), Error> { 142 | let rhost = std::str::from_utf8(&channel.dest_host)?; 143 | let rport = channel.dest_port; 144 | trace!("attempting TCP connect to {rhost} port={rport}"); 145 | let mut rstream = TcpStream::connect((rhost, rport)).await?; 146 | // Here `rstream` should be connected. Pass the error (unlikely) otherwise 147 | debug!("TCP forwarding to {}", rstream.peer_addr()?); 148 | channel.into_copy_bidirectional(&mut rstream).await?; 149 | trace!("TCP forwarding finished"); 150 | Ok(()) 151 | } 152 | 153 | #[cfg(test)] 154 | mod tests { 155 | use super::*; 156 | use bytes::Bytes; 157 | 158 | #[tokio::test] 159 | async fn test_bind_and_send_v4() { 160 | crate::tests::setup_logging(); 161 | let target_sock = UdpSocket::bind(("127.0.0.1", 0)).await.unwrap(); 162 | let target_addr = target_sock.local_addr().unwrap(); 163 | let (socket, target) = bind_for_target(("127.0.0.1", target_addr.port())) 164 | .await 165 | .unwrap(); 166 | assert_eq!(target, target_addr); 167 | socket.send_to(b"hello", target).await.unwrap(); 168 | let mut buf = vec![0; 5]; 169 | let (len, addr) = target_sock.recv_from(&mut buf).await.unwrap(); 170 | assert_eq!(len, 5); 171 | assert_eq!(addr.port(), socket.local_addr().unwrap().port()); 172 | assert_eq!(buf, b"hello"); 173 | target_sock.send_to(b"world", addr).await.unwrap(); 174 | socket.recv(&mut buf).await.unwrap(); 175 | assert_eq!(buf, b"world"); 176 | } 177 | 178 | #[tokio::test] 179 | async fn test_bind_and_send_v6() { 180 | crate::tests::setup_logging(); 181 | let target_sock = UdpSocket::bind(("::1", 0)).await.unwrap(); 182 | let target_addr = target_sock.local_addr().unwrap(); 183 | let (socket, target) = bind_for_target(("::1", target_addr.port())).await.unwrap(); 184 | assert_eq!(target, target_addr); 185 | socket.send_to(b"hello", target).await.unwrap(); 186 | let mut buf = vec![0; 5]; 187 | let (len, addr) = target_sock.recv_from(&mut buf).await.unwrap(); 188 | assert_eq!(len, 5); 189 | assert_eq!(addr.port(), socket.local_addr().unwrap().port()); 190 | assert_eq!(buf, b"hello"); 191 | target_sock.send_to(b"world", addr).await.unwrap(); 192 | socket.recv(&mut buf).await.unwrap(); 193 | assert_eq!(buf, b"world"); 194 | } 195 | 196 | #[tokio::test] 197 | async fn test_udp_forward_to_v4() { 198 | crate::tests::setup_logging(); 199 | let target_sock = UdpSocket::bind(("127.0.0.1", 0)).await.unwrap(); 200 | let target_addr = target_sock.local_addr().unwrap(); 201 | let (recv_tx, mut recv_rx) = tokio::sync::mpsc::channel(4); 202 | let (send_tx, send_rx) = tokio::sync::mpsc::channel(4); 203 | let datagram_frame = Datagram { 204 | flow_id: 0, 205 | target_host: Bytes::from_static(b"127.0.0.1"), 206 | target_port: target_addr.port(), 207 | data: Bytes::from_static(b"hello"), 208 | }; 209 | drop(send_tx); 210 | let forwarder = tokio::spawn(udp_forward_on(datagram_frame, send_rx, recv_tx)); 211 | let mut buf = vec![0; 5]; 212 | let (len, addr) = target_sock.recv_from(&mut buf).await.unwrap(); 213 | assert_eq!(len, 5); 214 | assert_eq!(buf, b"hello"); 215 | target_sock.send_to(b"test 1", addr).await.unwrap(); 216 | target_sock.send_to(b"test 2", addr).await.unwrap(); 217 | target_sock.send_to(b"test 3", addr).await.unwrap(); 218 | forwarder.await.unwrap().unwrap(); 219 | let datagram_frame: Datagram = recv_rx.recv().await.unwrap(); 220 | assert_eq!(*datagram_frame.data, *b"test 1"); 221 | let datagram_frame = recv_rx.recv().await.unwrap(); 222 | assert_eq!(*datagram_frame.data, *b"test 2"); 223 | let datagram_frame = recv_rx.recv().await.unwrap(); 224 | assert_eq!(*datagram_frame.data, *b"test 3"); 225 | } 226 | 227 | #[tokio::test] 228 | async fn test_udp_forward_to_v6() { 229 | crate::tests::setup_logging(); 230 | let target_sock = UdpSocket::bind(("::1", 0)).await.unwrap(); 231 | let target_addr = target_sock.local_addr().unwrap(); 232 | let (recv_tx, mut recv_rx) = tokio::sync::mpsc::channel(4); 233 | let (send_tx, send_rx) = tokio::sync::mpsc::channel(4); 234 | let datagram_frame = Datagram { 235 | flow_id: 0, 236 | target_host: Bytes::from_static(b"::1"), 237 | target_port: target_addr.port(), 238 | data: Bytes::from_static(b"hello"), 239 | }; 240 | drop(send_tx); 241 | let forwarder = tokio::spawn(udp_forward_on(datagram_frame, send_rx, recv_tx)); 242 | let mut buf = vec![0; 5]; 243 | let (len, addr) = target_sock.recv_from(&mut buf).await.unwrap(); 244 | assert_eq!(len, 5); 245 | assert_eq!(buf, b"hello"); 246 | target_sock.send_to(b"test 1", addr).await.unwrap(); 247 | target_sock.send_to(b"test 2", addr).await.unwrap(); 248 | target_sock.send_to(b"test 3", addr).await.unwrap(); 249 | forwarder.await.unwrap().unwrap(); 250 | let datagram_frame = recv_rx.recv().await.unwrap(); 251 | assert_eq!(*datagram_frame.data, *b"test 1"); 252 | let datagram_frame = recv_rx.recv().await.unwrap(); 253 | assert_eq!(*datagram_frame.data, *b"test 2"); 254 | let datagram_frame = recv_rx.recv().await.unwrap(); 255 | assert_eq!(*datagram_frame.data, *b"test 3"); 256 | } 257 | } 258 | -------------------------------------------------------------------------------- /src/server/io_with_timeout.rs: -------------------------------------------------------------------------------- 1 | use penguin_mux::timing::OptionalDuration; 2 | use std::{ 3 | pin::Pin, 4 | task::{Poll, ready}, 5 | }; 6 | use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; 7 | 8 | /// A wrapper around an `AsyncRead` with a read timeout. 9 | pub struct IoWithTimeout { 10 | stream: S, 11 | timeout: OptionalDuration, 12 | deadline: Pin + Send + 'static>>, 13 | } 14 | 15 | impl IoWithTimeout { 16 | pub fn new(stream: S, timeout: OptionalDuration) -> Self { 17 | let deadline = Box::pin(timeout.sleep()); 18 | IoWithTimeout { 19 | stream, 20 | timeout, 21 | deadline, 22 | } 23 | } 24 | 25 | pub fn into_inner(self) -> S { 26 | self.stream 27 | } 28 | 29 | fn reset(&mut self) { 30 | self.deadline = Box::pin(self.timeout.sleep()); 31 | } 32 | 33 | fn poll_elapsed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> { 34 | self.deadline.as_mut().poll(cx) 35 | } 36 | } 37 | 38 | impl AsyncRead for IoWithTimeout { 39 | fn poll_read( 40 | mut self: Pin<&mut Self>, 41 | cx: &mut std::task::Context<'_>, 42 | buf: &mut ReadBuf<'_>, 43 | ) -> Poll> { 44 | if self.poll_elapsed(cx).is_ready() { 45 | return Poll::Ready(Err(std::io::ErrorKind::TimedOut.into())); 46 | } 47 | let this = self.get_mut(); 48 | let stream = Pin::new(&mut this.stream); 49 | let result = ready!(stream.poll_read(cx, buf)); 50 | // If the read operation is `Ready`, reset the deadline 51 | this.reset(); 52 | Poll::Ready(result) 53 | } 54 | } 55 | 56 | impl AsyncWrite for IoWithTimeout { 57 | fn poll_write( 58 | self: Pin<&mut Self>, 59 | cx: &mut std::task::Context<'_>, 60 | buf: &[u8], 61 | ) -> Poll> { 62 | let this = self.get_mut(); 63 | let stream = Pin::new(&mut this.stream); 64 | let result = ready!(stream.poll_write(cx, buf)); 65 | // If the write operation is `Ready`, reset the deadline 66 | this.reset(); 67 | Poll::Ready(result) 68 | } 69 | 70 | fn poll_flush( 71 | self: Pin<&mut Self>, 72 | cx: &mut std::task::Context<'_>, 73 | ) -> Poll> { 74 | let this = self.get_mut(); 75 | let stream = Pin::new(&mut this.stream); 76 | let result = ready!(stream.poll_flush(cx)); 77 | // If the flush operation is `Ready`, reset the deadline 78 | this.reset(); 79 | Poll::Ready(result) 80 | } 81 | 82 | fn poll_shutdown( 83 | self: Pin<&mut Self>, 84 | cx: &mut std::task::Context<'_>, 85 | ) -> Poll> { 86 | let this = self.get_mut(); 87 | let stream = Pin::new(&mut this.stream); 88 | let result = ready!(stream.poll_shutdown(cx)); 89 | // If the shutdown operation is `Ready`, reset the deadline 90 | this.reset(); 91 | Poll::Ready(result) 92 | } 93 | } 94 | 95 | #[cfg(test)] 96 | mod tests { 97 | use super::*; 98 | use std::time::Duration; 99 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 100 | 101 | #[tokio::test] 102 | async fn test_read_will_timeout() { 103 | let (reader, mut writer) = tokio::io::simplex(1024); 104 | let mut io = IoWithTimeout::new(reader, Duration::from_millis(100).into()); 105 | 106 | tokio::spawn(async move { 107 | // Delay the write more than the timeout 108 | tokio::time::sleep(Duration::from_secs(1)).await; 109 | let _ = writer.write_all(b"hello").await; 110 | }); 111 | 112 | let mut buf = vec![0; 5]; 113 | let result = io.read_exact(&mut buf).await; 114 | assert!(result.is_err()); 115 | } 116 | 117 | #[tokio::test] 118 | async fn test_read_will_timeout_long() { 119 | let (reader, mut writer) = tokio::io::simplex(1024); 120 | let mut io = IoWithTimeout::new(reader, Duration::from_secs(2).into()); 121 | 122 | tokio::spawn(async move { 123 | // Delay the write more than the timeout 124 | tokio::time::sleep(Duration::from_secs(3)).await; 125 | let _ = writer.write_all(b"hello").await; 126 | }); 127 | 128 | let mut buf = vec![0; 5]; 129 | let result = io.read_exact(&mut buf).await; 130 | assert!(result.is_err()); 131 | } 132 | 133 | #[tokio::test] 134 | async fn test_read_can_succeed() { 135 | let (reader, mut writer) = tokio::io::simplex(1024); 136 | let mut io = IoWithTimeout::new(reader, Duration::from_secs(1).into()); 137 | 138 | tokio::spawn(async move { 139 | // Write before the timeout 140 | let _ = writer.write_all(b"hello").await; 141 | }); 142 | 143 | let mut buf = vec![0; 5]; 144 | let result = io.read_exact(&mut buf).await; 145 | assert!(result.is_ok()); 146 | assert_eq!(&buf, b"hello"); 147 | } 148 | 149 | #[tokio::test] 150 | async fn test_write_also_reset_deadline() { 151 | let (us, mut task) = tokio::io::duplex(1024); 152 | let mut io = IoWithTimeout::new(us, Duration::from_secs(1).into()); 153 | 154 | tokio::spawn(async move { 155 | let mut buf = vec![0; 5]; 156 | // Read from our end 157 | let _ = task.read_exact(&mut buf).await; 158 | // Delay a bit more 159 | tokio::time::sleep(Duration::from_millis(600)).await; 160 | // Write to the writer 161 | let _ = task.write_all(b"hello").await; 162 | }); 163 | 164 | let mut buf = vec![0; 5]; 165 | // Delay a little 166 | tokio::time::sleep(Duration::from_millis(500)).await; 167 | // Write to reset the deadline 168 | let result = io.write_all(b"hello").await; 169 | assert!(result.is_ok()); 170 | // Now read and check if it succeeds 171 | let result = io.read_exact(&mut buf).await; 172 | assert!(result.is_ok()); 173 | assert_eq!(&buf, b"hello"); 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /src/server/mod.rs: -------------------------------------------------------------------------------- 1 | //! Penguin server. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | #[cfg(feature = "acme")] 6 | pub mod acme; 7 | mod forwarder; 8 | mod io_with_timeout; 9 | mod service; 10 | mod websocket; 11 | 12 | use self::service::State; 13 | use crate::arg::ServerArgs; 14 | #[cfg(unix)] 15 | use crate::tls::reload_tls_identity; 16 | use crate::tls::{MaybeTlsStream, TlsIdentity, TlsIdentityInner, make_tls_identity}; 17 | use hyper_util::rt::TokioIo; 18 | use hyper_util::rt::tokio::TokioExecutor; 19 | use hyper_util::server::conn::auto; 20 | use penguin_mux::Dupe; 21 | use std::net::SocketAddr; 22 | use std::sync::Arc; 23 | use thiserror::Error; 24 | use tokio::net::{TcpListener, TcpStream}; 25 | use tokio::task::JoinSet; 26 | use tokio_tungstenite::WebSocketStream; 27 | use tracing::{debug, error, info, trace}; 28 | 29 | type WebSocket = WebSocketStream>; 30 | 31 | /// Server Errors 32 | #[derive(Debug, Error)] 33 | pub enum Error { 34 | #[error("Invalid listening host: {0}")] 35 | InvalidHost(#[from] std::net::AddrParseError), 36 | #[error(transparent)] 37 | Tls(#[from] crate::tls::Error), 38 | #[cfg(unix)] 39 | #[error("Cannot register signal handler: {0}")] 40 | Signal(std::io::Error), 41 | #[error("HTTP server I/O error: {0}")] 42 | Io(#[from] std::io::Error), 43 | #[error("TLS error: {0}")] 44 | #[cfg(feature = "nativetls")] 45 | NativeTls(#[from] tokio_native_tls::native_tls::Error), 46 | #[cfg(feature = "acme")] 47 | #[error(transparent)] 48 | Acme(#[from] acme::Error), 49 | } 50 | 51 | /// Check if TLS is enabled. 52 | /// If so, create a `TlsIdentity` and start relevant tasks 53 | async fn check_start_tls(args: &'static ServerArgs) -> Result, Error> { 54 | if let Some(tls_key) = &args.tls_key { 55 | // `expect`: `clap` ensures that both `--tls-cert` and `--tls-key` are 56 | // specified if either is specified. 57 | let tls_cert = args 58 | .tls_cert 59 | .as_ref() 60 | .expect("`tls_cert` is `None` (this is a bug)"); 61 | trace!("Enabling TLS"); 62 | let tls_config = make_tls_identity(tls_cert, tls_key, args.tls_ca.as_deref()).await?; 63 | #[cfg(unix)] 64 | register_signal_handler(tls_config.dupe(), tls_cert, tls_key, args.tls_ca.as_deref())?; 65 | return Ok(Some(tls_config)); 66 | } 67 | // `clap` ensures that tls-key or tls-domain are mutually exclusive. 68 | #[cfg(feature = "acme")] 69 | if !args.tls_domain.is_empty() { 70 | trace!("Enabling TLS using ACME"); 71 | let acme_client = acme::Client::populate_or_get(args).await?; 72 | let tls_config = acme_client.get_tls_config_spawn_renewal(); 73 | return Ok(Some(tls_config)); 74 | } 75 | trace!("TLS is not enabled"); 76 | Ok(None) 77 | } 78 | 79 | #[tracing::instrument(level = "trace")] 80 | pub async fn server_main(args: &'static ServerArgs) -> Result<(), Error> { 81 | let state = State::new( 82 | args.backend.as_ref(), 83 | args.ws_psk.as_ref(), 84 | &args.not_found_resp, 85 | args.obfs, 86 | args.reverse, 87 | args.timeout, 88 | args.timeout, 89 | )?; 90 | let sockaddrs = arg_to_sockaddrs(args)?; 91 | let mut listening_tasks = JoinSet::new(); 92 | if let Some(tls_config) = check_start_tls(args).await? { 93 | for sockaddr in sockaddrs { 94 | let listener = TcpListener::bind(sockaddr).await?; 95 | let actual_addr = listener.local_addr()?; 96 | info!("Listening on wss://{actual_addr}/ws"); 97 | listening_tasks.spawn(run_listener( 98 | listener, 99 | Some(tls_config.dupe()), 100 | state.dupe(), 101 | )); 102 | } 103 | } else { 104 | for sockaddr in sockaddrs { 105 | let listener = TcpListener::bind(sockaddr).await?; 106 | let actual_addr = listener.local_addr()?; 107 | info!("Listening on ws://{actual_addr}/ws"); 108 | listening_tasks.spawn(run_listener(listener, None, state.dupe())); 109 | } 110 | } 111 | while let Some(res) = listening_tasks.join_next().await { 112 | if let Err(err) = res { 113 | assert!(!err.is_panic(), "Panic in a listener: {err}"); 114 | error!("Listener finished with error: {err}"); 115 | } 116 | } 117 | Ok(()) 118 | } 119 | 120 | /// Run a signal handler task to reload the TLS certificate. 121 | #[cfg(unix)] 122 | #[inline] 123 | fn register_signal_handler( 124 | tls_config: crate::tls::TlsIdentity, 125 | tls_cert: &'static str, 126 | tls_key: &'static str, 127 | tls_ca: Option<&'static str>, 128 | ) -> Result<(), Error> { 129 | let mut sigusr1 = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::user_defined1()) 130 | .map_err(Error::Signal)?; 131 | // This `Future` does not fail, so we can ignore the `Result`. 132 | tokio::spawn(async move { 133 | while sigusr1.recv().await.is_some() { 134 | info!("Reloading TLS certificate"); 135 | if let Err(err) = reload_tls_identity(&tls_config, tls_cert, tls_key, tls_ca).await { 136 | error!("Cannot reload TLS certificate: {err}"); 137 | } 138 | } 139 | }); 140 | Ok(()) 141 | } 142 | 143 | /// Create a list of `SocketAddr`s from the command-line arguments on which to listen. 144 | fn arg_to_sockaddrs(arg: &ServerArgs) -> Result, Error> { 145 | // `expect`: `clap` ensures that `--port` has at least one element. 146 | let last_port = arg.port.last().expect("`port` is empty (this is a bug)"); 147 | assert!(!arg.host.is_empty(), "`host` is empty (this is a bug)"); 148 | // Fill the rest of `port` with the last element. 149 | let ports = arg.port.iter().chain(std::iter::repeat(last_port)); 150 | // Fills the rest of `port` with the last element. 151 | arg.host 152 | .iter() 153 | .zip(ports) 154 | .map(|(host, port)| { 155 | let host = crate::parse_remote::remove_brackets(host); 156 | let sockaddr: SocketAddr = (host.parse::()?, *port).into(); 157 | Ok(sockaddr) 158 | }) 159 | .collect() 160 | } 161 | 162 | /// Runs a listener. 163 | #[tracing::instrument(skip_all, level = "debug", fields(tls = %tls_config.is_some()))] 164 | async fn run_listener( 165 | listener: TcpListener, 166 | tls_config: Option, 167 | state: State<'static, hyper::body::Incoming>, 168 | ) { 169 | loop { 170 | let new_state = state.dupe(); 171 | let (stream, peer) = match listener.accept().await { 172 | Ok((stream, peer)) => (stream, peer), 173 | Err(err) => { 174 | error!("Accept error: {err}"); 175 | continue; 176 | } 177 | }; 178 | debug!("accepted connection from {peer}"); 179 | if let Some(tls_config) = &tls_config { 180 | tokio::spawn(serve_connection_tls( 181 | stream, 182 | new_state, 183 | tls_config.load_full(), 184 | )); 185 | } else { 186 | tokio::spawn(serve_connection(MaybeTlsStream::Plain(stream), new_state)); 187 | } 188 | } 189 | } 190 | 191 | /// Serves a single connection from a client with TLS, ignoring errors. 192 | async fn serve_connection_tls( 193 | stream: TcpStream, 194 | state: State<'static, hyper::body::Incoming>, 195 | tls_config: Arc, 196 | ) { 197 | let tls_timeout = state.tls_timeout; 198 | #[cfg(feature = "__rustls")] 199 | let stream_future = tokio_rustls::TlsAcceptor::from(tls_config).accept(stream); 200 | #[cfg(feature = "nativetls")] 201 | let stream_future = tls_config.accept(stream); 202 | 203 | let stream = state.tls_timeout.timeout(stream_future).await; 204 | 205 | match stream { 206 | Ok(Ok(stream)) => { 207 | serve_connection(stream.into(), state).await; 208 | } 209 | Ok(Err(err)) => { 210 | error!("TLS handshake error: {err}"); 211 | } 212 | Err(_) => { 213 | error!("TLS handshake timed out after {tls_timeout}"); 214 | } 215 | } 216 | } 217 | 218 | /// Serves a single connection from a client, ignoring errors. 219 | #[tracing::instrument(skip_all, level = "debug")] 220 | async fn serve_connection( 221 | stream: MaybeTlsStream, 222 | state: State<'static, hyper::body::Incoming>, 223 | ) { 224 | let stream_with_timeout = io_with_timeout::IoWithTimeout::new(stream, state.http_timeout); 225 | let hyper_io = TokioIo::new(stream_with_timeout); 226 | let exec = auto::Builder::new(TokioExecutor::new()); 227 | let conn = exec.serve_connection_with_upgrades(hyper_io, state); 228 | let conn = assert_send(conn); 229 | if let Err(err) = conn.await { 230 | error!("HTTP connection error: {err}"); 231 | } 232 | } 233 | 234 | /// Workaround at 235 | fn assert_send<'u, R>( 236 | fut: impl 'u + Send + Future, 237 | ) -> impl 'u + Send + Future { 238 | fut 239 | } 240 | 241 | #[cfg(test)] 242 | mod tests { 243 | use super::*; 244 | 245 | fn get_server_args(host: Vec, port: Vec) -> ServerArgs { 246 | ServerArgs { 247 | host, 248 | port, 249 | ..Default::default() 250 | } 251 | } 252 | 253 | /// Test `arg_to_sockaddrs` with no hosts and no ports. 254 | #[test] 255 | #[should_panic(expected = "`port` is empty (this is a bug)")] 256 | fn test_arg_to_sockaddrs_empty() { 257 | crate::tests::setup_logging(); 258 | let args = get_server_args(vec![], vec![]); 259 | let _ = arg_to_sockaddrs(&args).unwrap(); 260 | } 261 | 262 | /// Test `arg_to_sockaddrs` with no hosts and one port. 263 | #[test] 264 | #[should_panic(expected = "`host` is empty (this is a bug)")] 265 | fn test_arg_to_sockaddrs_empty_host() { 266 | crate::tests::setup_logging(); 267 | let args = get_server_args(vec![], vec![1234]); 268 | let _ = arg_to_sockaddrs(&args).unwrap(); 269 | } 270 | 271 | /// Test `arg_to_sockaddrs` with one host and no ports. 272 | #[test] 273 | #[should_panic(expected = "`port` is empty (this is a bug)")] 274 | fn test_arg_to_sockaddrs_empty_port() { 275 | crate::tests::setup_logging(); 276 | let args = get_server_args(vec!["::".to_string()], vec![]); 277 | let _ = arg_to_sockaddrs(&args).unwrap(); 278 | } 279 | 280 | /// Test `arg_to_sockaddrs` with a single host and a single port. 281 | #[test] 282 | fn test_arg_to_sockaddrs_single_v4() { 283 | crate::tests::setup_logging(); 284 | let args = get_server_args(vec!["127.0.0.1".to_string()], vec![9999]); 285 | let sockaddrs = arg_to_sockaddrs(&args).unwrap(); 286 | assert_eq!(sockaddrs.len(), 1); 287 | assert_eq!( 288 | sockaddrs[0].ip(), 289 | std::net::IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1)) 290 | ); 291 | assert_eq!(sockaddrs[0].port(), 9999); 292 | } 293 | 294 | /// Test `arg_to_sockaddrs` with a single host and a single port. 295 | #[test] 296 | fn test_arg_to_sockaddrs_single_v6() { 297 | crate::tests::setup_logging(); 298 | let args = get_server_args(vec!["[::1]".to_string()], vec![1532]); 299 | let sockaddrs = arg_to_sockaddrs(&args).unwrap(); 300 | assert_eq!(sockaddrs.len(), 1); 301 | assert_eq!( 302 | sockaddrs[0].ip(), 303 | std::net::IpAddr::V6(std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)) 304 | ); 305 | assert_eq!(sockaddrs[0].port(), 1532); 306 | } 307 | 308 | /// Test `arg_to_sockaddrs` with several hosts and one port. 309 | #[test] 310 | fn test_arg_to_sockaddrs_multi_v4() { 311 | crate::tests::setup_logging(); 312 | let args = get_server_args( 313 | vec![ 314 | "127.0.0.1".to_string(), 315 | "0.0.0.0".to_string(), 316 | "[::]".to_string(), 317 | "::1".to_string(), 318 | ], 319 | vec![1233], 320 | ); 321 | let sockaddrs = arg_to_sockaddrs(&args).unwrap(); 322 | assert_eq!(sockaddrs.len(), 4); 323 | assert_eq!( 324 | sockaddrs[0].ip(), 325 | std::net::IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1)) 326 | ); 327 | assert_eq!(sockaddrs[0].port(), 1233); 328 | assert_eq!( 329 | sockaddrs[1].ip(), 330 | std::net::IpAddr::V4(std::net::Ipv4Addr::new(0, 0, 0, 0)) 331 | ); 332 | assert_eq!(sockaddrs[1].port(), 1233); 333 | assert_eq!( 334 | sockaddrs[2].ip(), 335 | std::net::IpAddr::V6(std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)) 336 | ); 337 | assert_eq!(sockaddrs[2].port(), 1233); 338 | assert_eq!( 339 | sockaddrs[3].ip(), 340 | std::net::IpAddr::V6(std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)) 341 | ); 342 | assert_eq!(sockaddrs[3].port(), 1233); 343 | } 344 | } 345 | -------------------------------------------------------------------------------- /src/server/websocket.rs: -------------------------------------------------------------------------------- 1 | //! Penguin server `WebSocket` listener. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use super::WebSocket; 6 | use super::forwarder::tcp_forwarder_on_channel; 7 | use super::forwarder::udp_forward_on; 8 | use crate::config; 9 | use penguin_mux::{Datagram, Dupe, Multiplexor}; 10 | use tokio::{sync::mpsc, task::JoinSet}; 11 | use tracing::{debug, error, trace, warn}; 12 | 13 | #[cfg(feature = "nohash")] 14 | use nohash_hasher::IntMap; 15 | #[cfg(not(feature = "nohash"))] 16 | use std::collections::HashMap as IntMap; 17 | 18 | /// Multiplex the `WebSocket` connection and handle the forwarding requests. 19 | #[tracing::instrument(skip(ws_stream), level = "debug")] 20 | pub async fn handle_websocket(ws_stream: WebSocket, reverse: bool) { 21 | let options = penguin_mux::config::Options::new().bind_buffer_size(if reverse { 22 | config::BIND_BUFFER_SIZE 23 | } else { 24 | 0 25 | }); 26 | let mux = Multiplexor::new(ws_stream, Some(options), None); 27 | let mut udp_clients: IntMap> = IntMap::default(); 28 | debug!("WebSocket connection established"); 29 | let mut jobs = JoinSet::new(); 30 | // Channel for listeners to send UDP datagrams to the main loop 31 | let (datagram_send_tx, mut datagram_send_rx) = 32 | mpsc::channel::(config::INCOMING_DATAGRAM_BUFFER_SIZE); 33 | loop { 34 | trace!("server WebSocket loop"); 35 | tokio::select! { 36 | // Check if any of the jobs have finished 37 | Some(result) = jobs.join_next() => { 38 | match result { 39 | Ok(Ok(())) => {} 40 | Ok(Err(err)) => { 41 | warn!("Forwarder finished with error: {err}"); 42 | } 43 | Err(err) => { 44 | assert!(!err.is_panic(), "Panic in a forwarder: {err}"); 45 | } 46 | } 47 | } 48 | // Check if the multiplexor has received a new stream request 49 | Ok(result) = mux.accept_stream_channel() => { 50 | jobs.spawn(tcp_forwarder_on_channel(result)); 51 | } 52 | // Check if the multiplexor has received a UDP datagram 53 | Ok(datagram_frame) = mux.get_datagram() => { 54 | let flow_id = datagram_frame.flow_id; 55 | if let Some(sender) = udp_clients.get_mut(&flow_id) { 56 | sender.try_send(datagram_frame).unwrap_or_else(|err| { 57 | match err { 58 | mpsc::error::TrySendError::Closed(_) => { 59 | // This client has been pruned, so we should 60 | // remove it from the map and hopefully 61 | // the client will try again. 62 | trace!("UDP client {flow_id} has been pruned"); 63 | udp_clients.remove(&flow_id); 64 | } 65 | mpsc::error::TrySendError::Full(_) => { 66 | // The channel is full, so just discard the datagram 67 | trace!("UDP client {flow_id} has a full channel"); 68 | } 69 | } 70 | }); 71 | } else { 72 | let (sender, receiver) = mpsc::channel::(config::INCOMING_DATAGRAM_BUFFER_SIZE); 73 | udp_clients.insert(flow_id, sender); 74 | jobs.spawn(udp_forward_on(datagram_frame, receiver, datagram_send_tx.dupe())); 75 | } 76 | } 77 | // Check if any of the listeners have sent a UDP datagram 78 | Some(datagram_frame) = datagram_send_rx.recv() => { 79 | mux.send_datagram(datagram_frame).await.unwrap_or_else( 80 | |err| error!("Failed to send datagram: {err}"), 81 | ); 82 | } 83 | else => { 84 | // The multiplexor has closed for some reason 85 | break; 86 | } 87 | } 88 | } 89 | debug!("WebSocket connection closed"); 90 | jobs.shutdown().await; 91 | } 92 | -------------------------------------------------------------------------------- /src/tls/maybe_tls.rs: -------------------------------------------------------------------------------- 1 | use super::TlsStream; 2 | use std::{ 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; 7 | 8 | /// A stream that may be encrypted with TLS 9 | // This lint is a false positive because `T` is typically `TcpStream` which is not a zero-sized type. 10 | #[allow(clippy::large_enum_variant)] 11 | pub enum MaybeTlsStream { 12 | Tls(TlsStream), 13 | Plain(T), 14 | } 15 | 16 | impl AsyncRead for MaybeTlsStream { 17 | fn poll_read( 18 | self: Pin<&mut Self>, 19 | cx: &mut Context<'_>, 20 | buf: &mut ReadBuf<'_>, 21 | ) -> Poll> { 22 | match self.get_mut() { 23 | MaybeTlsStream::Tls(stream) => Pin::new(stream).poll_read(cx, buf), 24 | MaybeTlsStream::Plain(stream) => Pin::new(stream).poll_read(cx, buf), 25 | } 26 | } 27 | } 28 | 29 | impl AsyncWrite for MaybeTlsStream { 30 | fn poll_write( 31 | self: Pin<&mut Self>, 32 | cx: &mut Context<'_>, 33 | buf: &[u8], 34 | ) -> Poll> { 35 | match self.get_mut() { 36 | MaybeTlsStream::Tls(stream) => Pin::new(stream).poll_write(cx, buf), 37 | MaybeTlsStream::Plain(stream) => Pin::new(stream).poll_write(cx, buf), 38 | } 39 | } 40 | 41 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 42 | match self.get_mut() { 43 | MaybeTlsStream::Tls(stream) => Pin::new(stream).poll_flush(cx), 44 | MaybeTlsStream::Plain(stream) => Pin::new(stream).poll_flush(cx), 45 | } 46 | } 47 | 48 | fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 49 | match self.get_mut() { 50 | MaybeTlsStream::Tls(stream) => Pin::new(stream).poll_shutdown(cx), 51 | MaybeTlsStream::Plain(stream) => Pin::new(stream).poll_shutdown(cx), 52 | } 53 | } 54 | } 55 | 56 | #[cfg(feature = "__rustls")] 57 | impl From> for MaybeTlsStream { 58 | fn from(stream: tokio_rustls::server::TlsStream) -> Self { 59 | MaybeTlsStream::Tls(tokio_rustls::TlsStream::Server(stream)) 60 | } 61 | } 62 | 63 | #[cfg(feature = "__rustls")] 64 | impl From> for MaybeTlsStream { 65 | fn from(stream: tokio_rustls::client::TlsStream) -> Self { 66 | MaybeTlsStream::Tls(tokio_rustls::TlsStream::Client(stream)) 67 | } 68 | } 69 | 70 | #[cfg(feature = "nativetls")] 71 | impl From> for MaybeTlsStream { 72 | fn from(stream: tokio_native_tls::TlsStream) -> Self { 73 | MaybeTlsStream::Tls(stream) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/tls/mod.rs: -------------------------------------------------------------------------------- 1 | //! Common TLS functionalities. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | mod maybe_tls; 6 | #[cfg(feature = "nativetls")] 7 | mod native; 8 | #[cfg(feature = "__rustls")] 9 | mod rustls; 10 | 11 | #[cfg(all(feature = "nativetls", feature = "acme"))] 12 | use self::native::make_server_config_from_rcgen_pem; 13 | #[cfg(all(feature = "__rustls", feature = "acme"))] 14 | use self::rustls::make_server_config_from_rcgen_pem; 15 | #[cfg(feature = "__rustls")] 16 | use ::rustls::pki_types::InvalidDnsNameError; 17 | use arc_swap::ArcSwap; 18 | use std::sync::Arc; 19 | use thiserror::Error; 20 | 21 | #[cfg(all(feature = "__rustls", feature = "server"))] 22 | pub use self::rustls::{HyperConnector, make_hyper_connector}; 23 | #[allow(clippy::module_name_repetitions)] 24 | #[cfg(feature = "__rustls")] 25 | pub use self::rustls::{TlsIdentityInner, make_client_config, make_server_config}; 26 | #[cfg(all(feature = "nativetls", feature = "server"))] 27 | pub use native::{HyperConnector, make_hyper_connector}; 28 | #[cfg(feature = "nativetls")] 29 | pub use native::{TlsIdentityInner, make_client_config, make_server_config}; 30 | 31 | #[cfg(feature = "nativetls")] 32 | pub use tokio_native_tls::TlsStream; 33 | #[cfg(feature = "__rustls")] 34 | pub use tokio_rustls::TlsStream; 35 | 36 | pub use maybe_tls::MaybeTlsStream; 37 | 38 | /// A hot-swappable container for a TLS key and certificate. 39 | #[allow(clippy::module_name_repetitions)] 40 | pub type TlsIdentity = Arc>; 41 | 42 | pub const TLS_ALPN: [&str; 2] = ["h2", "http/1.1"]; 43 | 44 | /// Error type for TLS configuration 45 | #[derive(Error, Debug)] 46 | pub enum Error { 47 | #[error("Error reading certificate, key, or CA: {0}")] 48 | ReadCert(std::io::Error), 49 | #[error("Error making a TCP connection: {0}")] 50 | TcpConnect(std::io::Error), 51 | #[error("Rustls error: {0}")] 52 | #[cfg(feature = "__rustls")] 53 | Rustls(#[from] ::rustls::Error), 54 | #[cfg(feature = "__rustls")] 55 | #[error("Unable to determine server name for SNI")] 56 | DnsName(#[from] InvalidDnsNameError), 57 | #[error("Verifier error: {0}")] 58 | #[cfg(feature = "__rustls")] 59 | Verifier(#[from] ::rustls::client::VerifierBuilderError), 60 | #[error("Failed to parse certificates: {0}")] 61 | #[cfg(feature = "nativetls")] 62 | CertParse(#[from] tokio_native_tls::native_tls::Error), 63 | #[error("Unsupported private key type")] 64 | #[cfg(feature = "__rustls")] 65 | PrivateKeyNotSupported, 66 | } 67 | 68 | #[cfg(feature = "client")] 69 | pub async fn tls_connect( 70 | host: &str, 71 | port: u16, 72 | domain: &str, 73 | tls_cert: Option<&str>, 74 | tls_key: Option<&str>, 75 | tls_ca: Option<&str>, 76 | tls_insecure: bool, 77 | ) -> Result, Error> { 78 | let config = 79 | make_client_config(tls_cert, tls_key, tls_ca, tls_insecure, Some(&TLS_ALPN)).await?; 80 | let tcp_stream = tokio::net::TcpStream::connect((host, port)) 81 | .await 82 | .map_err(Error::TcpConnect)?; 83 | #[cfg(feature = "nativetls")] 84 | let tls_stream = { 85 | let connector = tokio_native_tls::TlsConnector::from(config); 86 | connector.connect(domain, tcp_stream).await? 87 | }; 88 | #[cfg(feature = "__rustls")] 89 | let tls_stream = { 90 | let connector: tokio_rustls::TlsConnector = Arc::new(config).into(); 91 | let server_name = ::rustls::pki_types::ServerName::try_from(domain.to_string())?; 92 | let client_st = connector 93 | .connect(server_name, tcp_stream) 94 | .await 95 | .map_err(Error::TcpConnect)?; 96 | TlsStream::Client(client_st) 97 | }; 98 | Ok(tls_stream) 99 | } 100 | 101 | pub async fn make_tls_identity( 102 | cert_path: &str, 103 | key_path: &str, 104 | client_ca_path: Option<&str>, 105 | ) -> Result { 106 | let identity = make_server_config(cert_path, key_path, client_ca_path).await?; 107 | Ok(Arc::new(ArcSwap::from_pointee(identity))) 108 | } 109 | 110 | #[cfg(feature = "acme")] 111 | pub async fn make_tls_identity_from_rcgen_pem( 112 | certs: String, 113 | keypair: rcgen::KeyPair, 114 | client_ca_path: Option<&str>, 115 | ) -> Result { 116 | let identity = make_server_config_from_rcgen_pem(certs, keypair, client_ca_path).await?; 117 | Ok(Arc::new(ArcSwap::from_pointee(identity))) 118 | } 119 | 120 | #[cfg(unix)] 121 | pub async fn reload_tls_identity( 122 | identity: &TlsIdentity, 123 | cert_path: &str, 124 | key_path: &str, 125 | client_ca_path: Option<&str>, 126 | ) -> Result<(), Error> { 127 | let new = make_server_config(cert_path, key_path, client_ca_path).await?; 128 | identity.store(Arc::new(new)); 129 | Ok(()) 130 | } 131 | 132 | #[cfg(feature = "acme")] 133 | pub async fn reload_tls_identity_from_rcgen_pem( 134 | identity: &TlsIdentity, 135 | certs: String, 136 | keypair: rcgen::KeyPair, 137 | client_ca_path: Option<&str>, 138 | ) -> Result<(), Error> { 139 | let new = make_server_config_from_rcgen_pem(certs, keypair, client_ca_path).await?; 140 | identity.store(Arc::new(new)); 141 | Ok(()) 142 | } 143 | -------------------------------------------------------------------------------- /src/tls/native.rs: -------------------------------------------------------------------------------- 1 | //! TLS-related code for `native-tls`. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0 OR GPL-3.0-or-later 4 | 5 | use super::Error; 6 | use tokio_native_tls::native_tls::{Certificate, Identity, TlsAcceptor, TlsConnector}; 7 | 8 | /// Type alias for the inner TLS identity type. 9 | pub type TlsIdentityInner = tokio_native_tls::TlsAcceptor; 10 | 11 | /// Type alias for the Hyper HTTPS connector. 12 | #[cfg(feature = "server")] 13 | pub type HyperConnector = 14 | hyper_tls::HttpsConnector; 15 | 16 | pub async fn make_server_config( 17 | cert_path: &str, 18 | key_path: &str, 19 | client_ca_path: Option<&str>, 20 | ) -> Result { 21 | let identity = read_key_cert(key_path, cert_path).await?; 22 | make_server_config_from_mem(identity, client_ca_path) 23 | } 24 | 25 | #[cfg(feature = "acme")] 26 | #[allow(clippy::unused_async)] 27 | pub async fn make_server_config_from_rcgen_pem( 28 | certs: String, 29 | keypair: rcgen::KeyPair, 30 | client_ca_path: Option<&str>, 31 | ) -> Result { 32 | let identity = Identity::from_pkcs8(certs.as_bytes(), keypair.serialize_pem().as_bytes())?; 33 | make_server_config_from_mem(identity, client_ca_path) 34 | } 35 | 36 | fn make_server_config_from_mem( 37 | identity: Identity, 38 | _client_ca_path: Option<&str>, 39 | ) -> Result { 40 | // TODO: support client CA (sfackler/rust-native-tls#161) 41 | let raw_acceptor = TlsAcceptor::builder(identity).build()?; 42 | Ok(raw_acceptor.into()) 43 | } 44 | 45 | pub async fn make_client_config( 46 | cert_path: Option<&str>, 47 | key_path: Option<&str>, 48 | ca_path: Option<&str>, 49 | tls_skip_verify: bool, 50 | tls_alpn: Option<&[&str]>, 51 | ) -> Result { 52 | let mut tls_config_builder = TlsConnector::builder(); 53 | tls_config_builder 54 | .danger_accept_invalid_certs(tls_skip_verify) 55 | .danger_accept_invalid_hostnames(tls_skip_verify); 56 | if let Some(tls_alpn) = tls_alpn { 57 | tls_config_builder.request_alpns(tls_alpn); 58 | } 59 | if let Some(ca_path) = ca_path { 60 | let ca = tokio::fs::read(ca_path).await.map_err(Error::ReadCert)?; 61 | tls_config_builder.add_root_certificate(Certificate::from_pem(&ca)?); 62 | } 63 | if let Some(cert_path) = cert_path { 64 | let identity = read_key_cert(key_path.unwrap_or(cert_path), cert_path).await?; 65 | tls_config_builder.identity(identity); 66 | } 67 | Ok(tls_config_builder.build()?) 68 | } 69 | 70 | async fn read_key_cert(key_path: &str, cert_path: &str) -> Result { 71 | let key = tokio::fs::read(key_path).await.map_err(Error::ReadCert)?; 72 | let cert = tokio::fs::read(cert_path).await.map_err(Error::ReadCert)?; 73 | Ok(Identity::from_pkcs8(&cert, &key)?) 74 | } 75 | 76 | #[cfg(feature = "server")] 77 | #[allow(clippy::unnecessary_wraps)] 78 | pub fn make_hyper_connector() -> std::io::Result { 79 | Ok(HyperConnector::new()) 80 | } 81 | 82 | // `native_tls` on macOS and Windows doesn't support reading Ed25519 nor ECDSA-based certificates, but `rcgen` doesn't support generating RSA keys. 83 | #[cfg(test)] 84 | #[cfg(not(any(target_os = "macos", target_os = "windows")))] 85 | mod tests { 86 | use super::*; 87 | use rcgen::CertificateParams; 88 | use tempfile::tempdir; 89 | 90 | #[tokio::test] 91 | async fn test_read_key_cert() { 92 | crate::tests::setup_logging(); 93 | let tmpdir = tempdir().unwrap(); 94 | let key_path = tmpdir.path().join("key.pem"); 95 | let cert_path = tmpdir.path().join("cert.pem"); 96 | let cert_params = CertificateParams::new(vec!["example.com".into()]).unwrap(); 97 | let keypair = rcgen::KeyPair::generate_for(&rcgen::PKCS_ECDSA_P384_SHA384).unwrap(); 98 | let custom_crt = cert_params.self_signed(&keypair).unwrap(); 99 | let crt = custom_crt.pem(); 100 | let crt_key = keypair.serialize_pem(); 101 | tokio::fs::write(&cert_path, crt).await.unwrap(); 102 | tokio::fs::write(&key_path, crt_key).await.unwrap(); 103 | read_key_cert(key_path.to_str().unwrap(), cert_path.to_str().unwrap()) 104 | .await 105 | .unwrap(); 106 | } 107 | #[tokio::test] 108 | #[cfg(feature = "acme")] 109 | async fn test_make_server_config_from_rcgen_pem() { 110 | crate::tests::setup_logging(); 111 | let cert_params = CertificateParams::new(vec!["example.com".into()]).unwrap(); 112 | let keypair = rcgen::KeyPair::generate_for(&rcgen::PKCS_ECDSA_P384_SHA384).unwrap(); 113 | let custom_crt = cert_params.self_signed(&keypair).unwrap(); 114 | let crt = custom_crt.pem(); 115 | 116 | let result = make_server_config_from_rcgen_pem(crt, keypair, None).await; 117 | 118 | assert!(result.is_ok()); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /tools/http01_helper: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | token="$(echo "$2"|cut -d. -f1)" 4 | 5 | if [ "$1" = "create" ]; then 6 | mkdir -p "$WEBROOT/.well-known/acme-challenge" 7 | echo "$2" > "$WEBROOT/.well-known/acme-challenge/$token" 8 | elif [ "$1" = "remove" ]; then 9 | rm "$WEBROOT/.well-known/acme-challenge/$token" 10 | fi 11 | -------------------------------------------------------------------------------- /tools/http01_socat_helper: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | port=80 4 | content="$2" 5 | len="${#content}" 6 | 7 | if [ "$1" = "create" ]; then 8 | socat TCP-LISTEN:$port,crlf,reuseaddr,fork SYSTEM:"sleep 1; echo 'HTTP/1.0 200 OK'; echo 'Content-Length: $len'; echo; printf '%s' '$content'" & 9 | pid=$! 10 | echo "$pid" > "/tmp/http01_socat_helper$content.pid6" 11 | elif [ "$1" = "remove" ]; then 12 | if [ -f "/tmp/http01_socat_helper$content.pid6" ]; then 13 | pid=$(cat "/tmp/http01_socat_helper$content.pid6") 14 | rm -f "/tmp/http01_socat_helper$content.pid6" 15 | if [ -n "$pid" ]; then 16 | kill "$pid" 2>/dev/null || true 17 | fi 18 | fi 19 | fi 20 | -------------------------------------------------------------------------------- /tools/penguin-v7.lua: -------------------------------------------------------------------------------- 1 | -- Wireshark dissector for the Penguin v7 protocol 2 | penguinv7_proto = Proto("penguin-v7", "Penguin v7 Protocol") 3 | 4 | local opcodes = { 5 | [0] = "Connect", 6 | [1] = "Acknowledge", 7 | [2] = "Reset", 8 | [3] = "Finish", 9 | [4] = "Push", 10 | [5] = "Bind", 11 | [6] = "Datagram" 12 | } 13 | 14 | local bind_types = { 15 | [1] = "TCP", 16 | [3] = "UDP" 17 | } 18 | 19 | local f_opcode = ProtoField.uint8("penguin-v7.opcode", "Operation Code", base.DEC, opcodes) 20 | local f_flow_id = ProtoField.uint32("penguin-v7.flow_id", "Flow ID", base.HEX) 21 | local f_rwnd = ProtoField.uint64("penguin-v7.rwnd", "Buffer Size") 22 | local f_ack = ProtoField.uint64("penguin-v7.ack", "Acknowledge Amount") 23 | local f_target_port = ProtoField.uint16("penguin-v7.target_port", "Target Port") 24 | local f_target_host = ProtoField.string("penguin-v7.target_host", "Target Host") 25 | local f_host_len = ProtoField.uint8("penguin-v7.host_len", "Host Length") 26 | local f_bind_type = ProtoField.uint8("penguin-v7.bind_type", "Bind Type", base.DEC, bind_types) 27 | local f_payload = ProtoField.bytes("penguin-v7.payload", "Payload") 28 | 29 | penguinv7_proto.fields = { 30 | f_opcode, 31 | f_flow_id, 32 | f_rwnd, 33 | f_ack, 34 | f_target_port, 35 | f_target_host, 36 | f_host_len, 37 | f_bind_type, 38 | f_payload 39 | } 40 | 41 | function penguinv7_proto.dissector(buffer, pinfo, tree) 42 | pinfo.cols.protocol = "Penguin v7" 43 | local subtree = tree:add(penguinv7_proto, buffer(), "Penguin v7 Protocol") 44 | local first_byte = buffer(0, 1):uint() 45 | if (first_byte >> 4) ~= 7 then 46 | -- Not penguin-v7 47 | return 0 48 | end 49 | local opcode = first_byte & 0x0F; 50 | subtree:add(f_opcode, opcode) 51 | subtree:add(f_flow_id, buffer(1, 4)) 52 | if opcode == 0 then 53 | -- Connect 54 | subtree:add(f_rwnd, buffer(5, 4)) 55 | subtree:add(f_target_port, buffer(9, 2)) 56 | subtree:add(f_target_host, buffer(11)) 57 | elseif opcode == 1 then 58 | -- Acknowledge 59 | subtree:add(f_ack, buffer(5, 4)) 60 | elseif opcode == 2 then 61 | -- Reset 62 | elseif opcode == 3 then 63 | -- Finish 64 | elseif opcode == 4 then 65 | -- Push 66 | subtree:add(f_payload, buffer(5)) 67 | elseif opcode == 5 then 68 | -- Bind 69 | local bind_type = buffer(5, 1):uint() 70 | if bind_types[bind_type] == nil then 71 | return 0 72 | end 73 | subtree:add(f_bind_type, bind_type) 74 | subtree:add(f_target_port, buffer(6, 2)) 75 | subtree:add(f_target_host, buffer(8)) 76 | elseif opcode == 6 then 77 | -- Datagram 78 | local host_len = buffer(5, 1):uint() 79 | subtree:add(f_host_len, host_len) 80 | subtree:add(f_target_port, buffer(6, 2)) 81 | subtree:add(f_target_host, buffer(8, host_len)) 82 | -- Remaining is data 83 | subtree:add(f_payload, buffer(8 + host_len)) 84 | else 85 | -- Not penguin-v7 86 | return 0 87 | end 88 | end 89 | local ws_dissector_table = DissectorTable.get("ws.protocol") 90 | ws_dissector_table:add("penguin-v7", penguinv7_proto) 91 | --------------------------------------------------------------------------------