├── .cbfmt.toml ├── .cspell └── custom-dictionary-workspace.txt ├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ ├── dependabot.yml │ └── release.yml ├── .gitignore ├── .hadolint.yaml ├── .markdownlint.yaml ├── .rustfmt.toml ├── .shellcheckrc ├── .trunk ├── .gitignore └── trunk.yaml ├── .vscode └── settings.json ├── CHANGELOG.md ├── Dockerfile ├── LICENCE ├── README.md ├── clippy.toml ├── core ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Cross.toml ├── about.hbs ├── about.toml ├── cross │ └── Dockerfile-arm64 └── src │ ├── bin │ ├── dandelion.rs │ └── example.rn │ ├── config │ ├── engine │ │ ├── connect.rs │ │ ├── geoip.rs │ │ ├── iplist.rs │ │ ├── mod.rs │ │ └── resolver.rs │ ├── mod.rs │ └── rune.rs │ ├── core │ ├── acceptor │ │ ├── http.rs │ │ ├── mod.rs │ │ └── socks5.rs │ ├── connector │ │ ├── block.rs │ │ ├── http.rs │ │ ├── mod.rs │ │ ├── quic.rs │ │ ├── simplex.rs │ │ ├── socks5.rs │ │ ├── speed.rs │ │ ├── tcp.rs │ │ └── tls.rs │ ├── endpoint.rs │ ├── io.rs │ ├── mod.rs │ ├── quic │ │ ├── client.rs │ │ └── mod.rs │ ├── resolver │ │ ├── hickory.rs │ │ ├── mod.rs │ │ └── system.rs │ └── simplex │ │ ├── client.rs │ │ ├── io.rs │ │ ├── mod.rs │ │ └── server.rs │ └── lib.rs └── snapcraft.yaml /.cbfmt.toml: -------------------------------------------------------------------------------- 1 | [languages] 2 | rust = ["rustfmt"] 3 | -------------------------------------------------------------------------------- /.cspell/custom-dictionary-workspace.txt: -------------------------------------------------------------------------------- 1 | # Custom Dictionary Words 2 | flate 3 | GEOIP 4 | Itertools 5 | Keepalive 6 | maxmind 7 | maxminddb 8 | mmap 9 | mmdb 10 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | core/target 2 | app 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/core" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "bundler" 13 | directory: "/app" 14 | schedule: 15 | interval: "weekly" 16 | - package-ecosystem: "github-actions" 17 | directory: "/" 18 | schedule: 19 | interval: "weekly" 20 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: [push] 3 | 4 | jobs: 5 | check_rust: 6 | strategy: 7 | matrix: 8 | os: [macos-latest, windows-latest, ubuntu-latest] 9 | runs-on: ${{ matrix.os }} 10 | steps: 11 | - uses: actions/checkout@v4 12 | # Rustls on windows depends on aws-lc, which depends on nasm. 13 | - uses: ilammy/setup-nasm@v1 14 | if: runner.os == 'Windows' 15 | - uses: dtolnay/rust-toolchain@stable 16 | with: 17 | components: clippy, rustfmt 18 | - uses: Swatinem/rust-cache@v2 19 | with: 20 | workspaces: | 21 | core 22 | - name: cargo fmt 23 | run: cargo fmt --all --manifest-path core/Cargo.toml -- --check 24 | - name: cargo clippy 25 | run: cargo clippy --all-targets --all-features --manifest-path core/Cargo.toml -- -D warnings 26 | - name: cargo test 27 | if: ${{ env.MAXMINDDB_LICENSE != 0 }} 28 | run: cargo test --manifest-path core/Cargo.toml -- --include-ignored 29 | env: 30 | MAXMINDDB_LICENSE: ${{ secrets.MAXMINDDB_LICENSE }} 31 | - name: cargo test (no secrets) 32 | if: ${{ env.MAXMINDDB_LICENSE == 0 }} 33 | run: cargo test --manifest-path core/Cargo.toml -- --include-ignored 34 | env: 35 | MAXMINDDB_LICENSE: ${{ secrets.MAXMINDDB_LICENSE }} 36 | SKIP_MAXMINDDB_TESTS: 1 37 | 38 | docker: 39 | runs-on: ubuntu-latest 40 | needs: [check_rust] 41 | if: ${{ github.ref == 'refs/heads/main' }} 42 | steps: 43 | - name: Set up QEMU 44 | uses: docker/setup-qemu-action@v3 45 | - name: Set up Docker Buildx 46 | uses: docker/setup-buildx-action@v3 47 | - name: Login to GitHub Container Registry 48 | uses: docker/login-action@v3 49 | with: 50 | registry: ghcr.io 51 | username: ${{ github.repository_owner }} 52 | password: ${{ secrets.GITHUB_TOKEN }} 53 | - name: Build and push 54 | id: docker_build 55 | uses: docker/build-push-action@v6 56 | with: 57 | push: true 58 | # We don't build docker image for other archs because it's toooooo slow. 59 | # You can build it yourself or use snap to install it. 60 | platforms: "linux/amd64" 61 | tags: ghcr.io/zhuhaow/dandelion:latest 62 | cache-from: type=registry,ref=ghcr.io/zhuhaow/dandelion:buildcache 63 | cache-to: type=registry,ref=ghcr.io/zhuhaow/dandelion:buildcache,mode=max 64 | -------------------------------------------------------------------------------- /.github/workflows/dependabot.yml: -------------------------------------------------------------------------------- 1 | name: Dependabot auto-merge 2 | on: pull_request 3 | 4 | permissions: 5 | contents: write 6 | pull-requests: write 7 | 8 | jobs: 9 | dependabot: 10 | runs-on: ubuntu-latest 11 | if: ${{ github.event.pull_request.user.login == 'dependabot[bot]' }} 12 | steps: 13 | - name: Dependabot metadata 14 | id: dependabot-metadata 15 | uses: dependabot/fetch-metadata@v2 16 | - name: Enable auto-merge for Dependabot PRs 17 | if: ${{ steps.metadata.outputs.update-type == 'version-update:semver-patch' || steps.metadata.outputs.update-type == 'version-update:semver-minor'}} 18 | run: gh pr merge --auto --merge "$PR_URL" 19 | env: 20 | PR_URL: ${{github.event.pull_request.html_url}} 21 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 22 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - "*" 6 | 7 | jobs: 8 | create_release: 9 | name: Create a new Github release 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: taiki-e/create-gh-release-action@v1 14 | with: 15 | changelog: CHANGELOG.md 16 | branch: main 17 | env: 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | 20 | release_app: 21 | name: Release GUI app 22 | runs-on: macos-11 23 | needs: [create_release] 24 | steps: 25 | - uses: actions/checkout@v4 26 | - uses: actions-rs/toolchain@v1 27 | with: 28 | toolchain: stable 29 | override: true 30 | target: aarch64-apple-darwin 31 | - uses: Swatinem/rust-cache@v1 32 | with: 33 | working-directory: core/ 34 | - uses: ruby/setup-ruby@v1 35 | with: 36 | ruby-version: "3.0" 37 | bundler-cache: true 38 | working-directory: app 39 | - uses: maxim-lobanov/setup-xcode@v1 40 | with: 41 | xcode-version: latest-stable 42 | - uses: webfactory/ssh-agent@v0.9.0 43 | with: 44 | ssh-private-key: ${{ secrets.MATCH_DEPLOY_KEY }} 45 | - run: bundler exec fastlane ci_release 46 | env: 47 | MATCH_READONLY: true 48 | MATCH_PASSWORD: ${{ secrets.MATCH_PASSWORD }} 49 | FASTLANE_APPLE_APPLICATION_SPECIFIC_PASSWORD: ${{ secrets.FASTLANE_APPLE_APPLICATION_SPECIFIC_PASSWORD }} 50 | working-directory: app 51 | - name: Upload assets 52 | run: | 53 | gh release upload "${{ github.ref_name }}" app/Specht2.app.zip --clobber 54 | gh release upload "${{ github.ref_name }}" app/Specht2.app.dSYM.zip --clobber 55 | env: 56 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 57 | - name: update version information for sparkle 58 | run: app/bin/publish_appcast.sh 59 | env: 60 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 61 | SPARKLE_KEY: ${{ secrets.SPARKLE_KEY }} 62 | 63 | release_docker: 64 | name: Release Docker image 65 | runs-on: ubuntu-latest 66 | needs: [create_release] 67 | steps: 68 | - name: Set up QEMU 69 | uses: docker/setup-qemu-action@v3 70 | - name: Set up Docker Buildx 71 | uses: docker/setup-buildx-action@v3 72 | - name: Login to GitHub Container Registry 73 | uses: docker/login-action@v3 74 | with: 75 | registry: ghcr.io 76 | username: ${{ github.repository_owner }} 77 | password: ${{ secrets.GITHUB_TOKEN }} 78 | - name: Build and push 79 | id: docker_build 80 | uses: docker/build-push-action@v6 81 | with: 82 | push: true 83 | # We don't build docker image for other archs because it's toooooo slow. 84 | # You can build it yourself or use snap to install it. 85 | platforms: "linux/amd64" 86 | tags: ghcr.io/zhuhaow/specht2:${{ github.ref_name }} 87 | cache-from: type=registry,ref=ghcr.io/zhuhaow/specht2:buildcache 88 | cache-to: type=registry,ref=ghcr.io/zhuhaow/specht2:buildcache,mode=max 89 | 90 | release_bin: 91 | name: Release CLI binary 92 | needs: [create_release] 93 | strategy: 94 | matrix: 95 | include: 96 | - target: x86_64-unknown-linux-gnu 97 | - target: aarch64-unknown-linux-gnu 98 | - target: x86_64-apple-darwin 99 | os: macos-11 100 | - target: aarch64-apple-darwin 101 | os: macos-11 102 | - target: x86_64-pc-windows-msvc 103 | os: windows-latest 104 | runs-on: ${{ matrix.os || 'ubuntu-latest' }} 105 | steps: 106 | - uses: actions/checkout@v4 107 | - run: | 108 | cp -r core .. 109 | rm -rf ./* 110 | cp -r ../core/* . 111 | shell: bash 112 | - if: ${{ matrix.target != 'aarch64-apple-darwin' }} 113 | uses: actions-rs/toolchain@v1 114 | with: 115 | toolchain: stable 116 | override: true 117 | - if: ${{ matrix.target == 'aarch64-apple-darwin' }} 118 | uses: actions-rs/toolchain@v1 119 | with: 120 | toolchain: stable 121 | override: true 122 | target: aarch64-apple-darwin 123 | - uses: Swatinem/rust-cache@v1 124 | - if: ${{ matrix.target == 'aarch64-unknown-linux-gnu' }} 125 | run: | 126 | docker build -t arm64_with_openssl -f cross/Dockerfile-arm64 . 127 | - uses: taiki-e/upload-rust-binary-action@v1 128 | with: 129 | bin: specht2 130 | tar: unix 131 | zip: windows 132 | target: ${{ matrix.target }} 133 | archive: $bin-$target 134 | env: 135 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 136 | CARGO_PROFILE_RELEASE_LTO: true 137 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/osx 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=osx 4 | 5 | ### OSX ### 6 | # General 7 | .DS_Store 8 | .AppleDouble 9 | .LSOverride 10 | 11 | # Icon must end with two \r 12 | Icon 13 | 14 | 15 | # Thumbnails 16 | ._* 17 | 18 | # Files that might appear in the root of a volume 19 | .DocumentRevisions-V100 20 | .fseventsd 21 | .Spotlight-V100 22 | .TemporaryItems 23 | .Trashes 24 | .VolumeIcon.icns 25 | .com.apple.timemachine.donotpresent 26 | 27 | # Directories potentially created on remote AFP share 28 | .AppleDB 29 | .AppleDesktop 30 | Network Trash Folder 31 | Temporary Items 32 | .apdisk 33 | 34 | # End of https://www.toptal.com/developers/gitignore/api/osx 35 | 36 | .test_config 37 | -------------------------------------------------------------------------------- /.hadolint.yaml: -------------------------------------------------------------------------------- 1 | # Following source doesn't work in most setups 2 | ignored: 3 | - SC1090 4 | - SC1091 5 | # We don't really care about reproducibility that much 6 | - DL3008 7 | -------------------------------------------------------------------------------- /.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | # Autoformatter friendly markdownlint config (all formatting rules disabled) 2 | default: true 3 | blank_lines: false 4 | bullet: false 5 | html: false 6 | indentation: false 7 | line_length: false 8 | spaces: false 9 | url: false 10 | whitespace: false 11 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | unstable_features = true 3 | imports_granularity = "Crate" 4 | -------------------------------------------------------------------------------- /.shellcheckrc: -------------------------------------------------------------------------------- 1 | enable=all 2 | source-path=SCRIPTDIR 3 | disable=SC2154 4 | 5 | # If you're having issues with shellcheck following source, disable the errors via: 6 | # disable=SC1090 7 | # disable=SC1091 8 | -------------------------------------------------------------------------------- /.trunk/.gitignore: -------------------------------------------------------------------------------- 1 | *out 2 | *logs 3 | plugins 4 | user_trunk.yaml 5 | -------------------------------------------------------------------------------- /.trunk/trunk.yaml: -------------------------------------------------------------------------------- 1 | version: 0.1 2 | cli: 3 | version: 0.16.1-beta 4 | lint: 5 | enabled: 6 | - actionlint@1.6.15 7 | - clippy@1.58.1 8 | - git-diff-check@SYSTEM 9 | - gitleaks@8.10.3 10 | - hadolint@2.10.0 11 | - markdownlint@0.32.1 12 | - prettier@2.7.1 13 | - rustfmt@1.58.1 14 | - shellcheck@0.8.0 15 | - shfmt@3.5.0 16 | - taplo@release-taplo-cli-0.6.8 17 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cSpell.customDictionaries": { 3 | "custom-dictionary-workspace": { 4 | "name": "custom-dictionary-workspace", 5 | "path": "${workspaceFolder:dandelion}/.cspell/custom-dictionary-workspace.txt", 6 | "addWords": true, 7 | "scope": "workspace" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1-bookworm as base 2 | RUN cargo install cargo-chef 3 | 4 | FROM base as planner 5 | WORKDIR /app 6 | COPY ./core ./ 7 | RUN cargo chef prepare 8 | 9 | FROM base as builder 10 | WORKDIR /app 11 | COPY --from=planner /app/recipe.json recipe.json 12 | RUN cargo chef cook --release 13 | COPY ./core ./ 14 | RUN cargo build --release && cargo install --path . --locked 15 | 16 | FROM debian:buster-slim 17 | RUN apt-get update && apt-get install -y --no-install-recommends \ 18 | ca-certificates \ 19 | libssl1.1 \ 20 | && rm -rf /var/lib/apt/lists/* 21 | COPY --from=builder /usr/local/cargo/bin/dandelion /usr/local/bin/dandelion 22 | ENTRYPOINT ["/usr/local/bin/dandelion"] 23 | CMD ["/config.rn"] 24 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Zhuhao Wang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dandelion 2 | 3 | dandelion is a proxy forwarding data by fully programmable rules with [rune](https://github.com/rune-rs/rune). 4 | 5 | -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /core/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/rust 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=rust 4 | 5 | ### Rust ### 6 | # Generated by Cargo 7 | # will have compiled files and executables 8 | debug/ 9 | target/ 10 | 11 | # These are backup files generated by rustfmt 12 | **/*.rs.bk 13 | 14 | # MSVC Windows builds of rustc generate these, which store debugging information 15 | *.pdb 16 | 17 | # End of https://www.toptal.com/developers/gitignore/api/rust 18 | -------------------------------------------------------------------------------- /core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dandelion" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | authors = ["Zhuhao Wang "] 7 | 8 | [dependencies] 9 | tokio = { version = "1.45.1", features = ["io-util", "net", "macros", "rt"] } 10 | async-trait = "0.1.88" 11 | tokio-tungstenite = "0.26.2" 12 | futures = "0.3.31" 13 | http = "1.3.1" 14 | pin-project = "1.1.10" 15 | chrono = "0.4.41" 16 | hyper-tungstenite = "0.17.0" 17 | hyper = { version = "1.6.0", features = ["http1", "server", "client"] } 18 | bytes = "1.10.1" 19 | tungstenite = "0.26.2" 20 | serde = { version = "1.0.219", features = ["derive"] } 21 | anyhow = { version = "1.0.98", features = ["backtrace"] } 22 | tokio-native-tls = "0.3.1" 23 | lazy_static = "1.5.0" 24 | tempfile = "3.20.0" 25 | dns-lookup = "2.0.4" 26 | itertools = "0.14.0" 27 | libc = "0.2.172" 28 | rand = "0.9.1" 29 | scopeguard = "1.2.0" 30 | hickory-proto = "0.25.2" 31 | tokio-util = { version = "0.7.15", features = ["codec"] } 32 | byteorder = "1.5.0" 33 | cfg-if = "1.0.0" 34 | tracing = { version = "0.1.41", features = ["log"] } 35 | socket2 = { version = "0.5.10", features = ["all"] } 36 | rustc-hash = "2.1.1" 37 | hickory-resolver = "0.25.2" 38 | quinn = { version = "0.11.8", features = ["ring", "runtime-tokio", "platform-verifier"] } 39 | rustls = "0.23.27" 40 | httparse = "1.10.1" 41 | auto_impl = "1.3.0" 42 | http-body-util = "0.1.3" 43 | hyper-util = { version = "0.1.13" } 44 | rustls-platform-verifier = "0.5.3" 45 | rune = "0.14.0" 46 | ipnetwork = "0.21.1" 47 | flate2 = "1.1.1" 48 | maxminddb = { version = "0.26.0", features = ["mmap"] } 49 | tar = "0.4.44" 50 | reqwest = { version = "0.12.18" } 51 | log = "0.4.27" 52 | structopt = "0.3.26" 53 | flexi_logger = "0.30.2" 54 | fdlimit = "0.3.0" 55 | cached = "0.55.1" 56 | 57 | [dev-dependencies] 58 | env_logger = "0.11.8" 59 | rstest = "0.25.0" 60 | test-log = "0.2.17" 61 | -------------------------------------------------------------------------------- /core/Cross.toml: -------------------------------------------------------------------------------- 1 | [target.aarch64-unknown-linux-gnu] 2 | image = "arm64_with_openssl" 3 | -------------------------------------------------------------------------------- /core/about.hbs: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 36 | 37 | 38 | 39 |
40 |
41 |

Third Party Licenses

42 |

This page lists the licenses of the projects used in Specht2.

43 |
44 | 45 |

Overview of licenses:

46 | 51 | 52 |

All license text:

53 | 67 |
68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /core/about.toml: -------------------------------------------------------------------------------- 1 | accepted = ["MIT", "Apache-2.0", "BSD-3-Clause", "BSD-2-Clause", "ISC"] 2 | -------------------------------------------------------------------------------- /core/cross/Dockerfile-arm64: -------------------------------------------------------------------------------- 1 | # This is only used for cross build for arm64 2 | 3 | FROM rustembedded/cross:aarch64-unknown-linux-gnu-0.2.1 4 | 5 | RUN dpkg --add-architecture arm64 && \ 6 | apt-get update && \ 7 | apt-get install -y --no-install-recommends \ 8 | libssl-dev:arm64 \ 9 | && rm -rf /var/lib/apt/lists/* 10 | 11 | ENV PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig 12 | -------------------------------------------------------------------------------- /core/src/bin/dandelion.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use dandelion::{config::Engine, Result}; 3 | use std::{ 4 | env, 5 | fs::read_to_string, 6 | path::{Path, PathBuf}, 7 | }; 8 | use structopt::StructOpt; 9 | 10 | #[derive(Debug, StructOpt)] 11 | #[structopt(name = "dandelion", about = "CLI version of the dandelion client")] 12 | struct Opt { 13 | #[structopt(parse(from_os_str))] 14 | input: Option, 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<()> { 19 | flexi_logger::Logger::try_with_env_or_str("warn,dandelion_core=info,dandelion_config=info") 20 | .unwrap() 21 | .start() 22 | .unwrap(); 23 | 24 | #[cfg(not(target_os = "windows"))] 25 | { 26 | use fdlimit::{raise_fd_limit, Outcome}; 27 | use tracing::{info, warn}; 28 | 29 | match raise_fd_limit() { 30 | Ok(Outcome::LimitRaised { to, from: _ }) => info!("Raised fd limit to {}", to), 31 | Ok(Outcome::Unsupported) => {}, 32 | Err(err) => warn!("Failed to raise fd limit due to {}, this may cause \"Too many files error\" when there are too many connections", err), 33 | } 34 | } 35 | 36 | rustls::crypto::aws_lc_rs::default_provider() 37 | .install_default() 38 | .map_err(|_| anyhow::anyhow!("Failed to install aws lc provider"))?; 39 | 40 | let opt: Opt = Opt::from_args(); 41 | 42 | fn load_config_from_env(env: &str, path: &str) -> Result { 43 | Ok(read_to_string(Path::new(&env::var(env)?).join(path))?) 44 | } 45 | 46 | let code = match opt.input { 47 | Some(path) => read_to_string(&path) 48 | .with_context(|| format!("Failed to load config file {}", path.to_string_lossy()))?, 49 | None => load_config_from_env("SNAP_COMMON", "./config.rn") 50 | .or_else(|_| load_config_from_env("HOME", "./.dandelion/config.rn")) 51 | .context( 52 | "Failed to load config from $SNAP_COMMON/config.rn or $HOME/.dandelion/config.rn", 53 | )?, 54 | }; 55 | 56 | let engine = Engine::load_config("config", code).await?; 57 | 58 | engine.run().await 59 | } 60 | -------------------------------------------------------------------------------- /core/src/bin/example.rn: -------------------------------------------------------------------------------- 1 | pub async fn config() { 2 | let config = Config::new(); 3 | 4 | config.add_http_acceptor("127.0.0.1:8123", "handler")?; 5 | config.add_socks5_acceptor("127.0.0.1:8124", "handler")?; 6 | 7 | config.add_system_resolver("system")?; 8 | 9 | Ok(config) 10 | } 11 | 12 | pub async fn handler(connector) { 13 | connector.new_tcp(connector.endpoint(), "system").await 14 | } 15 | -------------------------------------------------------------------------------- /core/src/config/engine/connect.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{ 3 | connector::{ 4 | block::connect as block_connect, 5 | http::connect as http_connect, 6 | quic::{connect as quic_connect, create_quic_connection, QuicConnection}, 7 | simplex::connect as simplex_connect, 8 | socks5::connect as socks5_connect, 9 | tcp::connect as tcp_connect, 10 | tls::connect as tls_connect, 11 | }, 12 | endpoint::Endpoint, 13 | io::Io, 14 | simplex::Config, 15 | }, 16 | Result, 17 | }; 18 | use rune::{runtime::Ref, Any, Module, Value}; 19 | use std::{fmt::Debug, net::IpAddr, sync::Arc}; 20 | 21 | use crate::config::{engine::resolver::ResolverWrapper, rune::create_wrapper}; 22 | 23 | create_wrapper!(IoWrapper, Io, Box); 24 | create_wrapper!(QuicConnectionWrapper, Arc); 25 | 26 | #[derive(Debug, Any)] 27 | pub struct ConnectRequest { 28 | endpoint: Endpoint, 29 | } 30 | 31 | impl ConnectRequest { 32 | pub fn new(endpoint: Endpoint) -> Self { 33 | Self { endpoint } 34 | } 35 | } 36 | 37 | #[rune::function] 38 | pub async fn new_tcp(endpoint: Ref, resolver: ResolverWrapper) -> Result { 39 | Ok(tcp_connect(&endpoint.parse()?, resolver.into_inner()) 40 | .await? 41 | .into()) 42 | } 43 | 44 | #[rune::function] 45 | pub async fn new_quic_connection( 46 | server: Ref, 47 | resolver: ResolverWrapper, 48 | alpn: Value, 49 | ) -> Result { 50 | let alpn_vec: Vec = rune::from_value(alpn)?; 51 | 52 | Ok(Arc::new( 53 | create_quic_connection( 54 | server.parse()?, 55 | resolver.into_inner(), 56 | alpn_vec.into_iter().map(|x| x.into_bytes()).collect(), 57 | ) 58 | .await?, 59 | ) 60 | .into()) 61 | } 62 | 63 | #[rune::function] 64 | pub async fn new_quic(connection: QuicConnectionWrapper) -> Result { 65 | Ok(quic_connect(connection.inner()).await?.into()) 66 | } 67 | 68 | #[rune::function] 69 | pub async fn new_tls(endpoint: Ref, nexthop: IoWrapper) -> Result { 70 | Ok(tls_connect(&endpoint.parse()?, nexthop.0).await?.into()) 71 | } 72 | 73 | #[rune::function] 74 | pub async fn new_block(endpoint: Ref) -> Result { 75 | match block_connect(&endpoint.parse()?).await { 76 | Ok(_) => unreachable!(), 77 | Err(e) => Err(e), 78 | } 79 | } 80 | 81 | #[rune::function] 82 | pub async fn new_http(endpoint: Ref, nexthop: IoWrapper) -> Result { 83 | Ok(http_connect(&endpoint.parse()?, nexthop.0).await?.into()) 84 | } 85 | 86 | #[derive(Any)] 87 | #[rune(constructor)] 88 | pub struct SimplexConfig { 89 | pub host: String, 90 | pub path: String, 91 | pub header_name: String, 92 | pub header_value: String, 93 | } 94 | 95 | #[rune::function] 96 | pub async fn new_simplex( 97 | endpoint: Ref, 98 | config: SimplexConfig, 99 | nexthop: IoWrapper, 100 | ) -> Result { 101 | let config = Config::new( 102 | config.host, 103 | config.path, 104 | (config.header_name, config.header_value), 105 | ); 106 | 107 | Ok(simplex_connect(&endpoint.parse()?, &config, nexthop.0) 108 | .await? 109 | .into()) 110 | } 111 | 112 | #[rune::function] 113 | pub async fn new_socks5(endpoint: Ref, nexthop: IoWrapper) -> Result { 114 | Ok(socks5_connect(&endpoint.parse()?, nexthop.0).await?.into()) 115 | } 116 | 117 | impl ConnectRequest { 118 | #[rune::function] 119 | pub fn port(&self) -> u16 { 120 | self.endpoint.port() 121 | } 122 | 123 | #[rune::function] 124 | pub fn hostname(&self) -> String { 125 | self.endpoint.hostname() 126 | } 127 | 128 | #[rune::function] 129 | pub fn endpoint(&self) -> String { 130 | self.endpoint.to_string() 131 | } 132 | 133 | #[rune::function] 134 | pub fn hostname_is_ip(&self) -> bool { 135 | self.hostname_as_ip().is_some() 136 | } 137 | 138 | fn hostname_as_ip(&self) -> Option { 139 | match &self.endpoint { 140 | Endpoint::Addr(addr) => Some(addr.ip().to_string()), 141 | Endpoint::Domain(domain, _) => domain.parse::().ok().map(|ip| ip.to_string()), 142 | } 143 | } 144 | } 145 | 146 | impl ConnectRequest { 147 | pub fn module() -> Result { 148 | let mut module = Module::default(); 149 | 150 | module.ty::()?; 151 | module.ty::()?; 152 | module.ty::()?; 153 | 154 | module.function_meta(new_tcp)?; 155 | module.function_meta(new_tls)?; 156 | module.function_meta(new_block)?; 157 | module.function_meta(new_http)?; 158 | module.function_meta(new_simplex)?; 159 | module.function_meta(new_socks5)?; 160 | 161 | module.function_meta(new_quic_connection)?; 162 | module.function_meta(new_quic)?; 163 | 164 | module.function_meta(Self::port)?; 165 | module.function_meta(Self::hostname)?; 166 | module.function_meta(Self::endpoint)?; 167 | module.function_meta(Self::hostname_is_ip)?; 168 | 169 | Ok(module) 170 | } 171 | } 172 | 173 | #[cfg(test)] 174 | mod tests { 175 | use std::{str::FromStr, sync::Arc}; 176 | 177 | use rstest::rstest; 178 | use rune::{ 179 | termcolor::{ColorChoice, StandardStream}, 180 | Context, Diagnostics, FromValue, Source, Sources, Vm, 181 | }; 182 | 183 | use super::*; 184 | 185 | fn get_vm(sources: &mut Sources) -> Result { 186 | let mut context = Context::with_default_modules()?; 187 | context.install(ConnectRequest::module()?)?; 188 | 189 | let mut diagnostics = Diagnostics::new(); 190 | let result = rune::prepare(sources) 191 | .with_context(&context) 192 | .with_diagnostics(&mut diagnostics) 193 | .build(); 194 | 195 | if !diagnostics.is_empty() { 196 | let mut writer = StandardStream::stderr(ColorChoice::Always); 197 | diagnostics.emit(&mut writer, sources)?; 198 | } 199 | 200 | Ok(Vm::new(Arc::new(context.runtime()?), Arc::new(result?))) 201 | } 202 | 203 | fn test_request(method_name: &str, endpoint: Endpoint) -> Result { 204 | let mut sources = Sources::new(); 205 | 206 | sources.insert(Source::new( 207 | "entry", 208 | format!( 209 | " 210 | pub fn main(request) {{ 211 | request.{}() 212 | }} 213 | ", 214 | method_name, 215 | ), 216 | )?)?; 217 | 218 | let mut vm = get_vm(&mut sources)?; 219 | 220 | let request = ConnectRequest::new(endpoint); 221 | 222 | let output = rune::from_value(vm.call(["main"], (request,))?)?; 223 | 224 | Ok(output) 225 | } 226 | 227 | #[rstest] 228 | #[case("127.0.0.1:80", 80)] 229 | #[case("[::1]:80", 80)] 230 | #[case("example.com:80", 80)] 231 | fn test_connect_request_port(#[case] endpoint: &str, #[case] port: u16) -> Result<()> { 232 | let request = Endpoint::from_str(endpoint)?; 233 | 234 | assert_eq!(test_request::("port", request)?, port); 235 | 236 | Ok(()) 237 | } 238 | 239 | #[rstest] 240 | #[case("127.0.0.1:80", "127.0.0.1")] 241 | #[case("[::1]:80", "::1")] 242 | #[case("example.com:80", "example.com")] 243 | fn test_connect_request_hostname(#[case] endpoint: &str, #[case] hostname: &str) -> Result<()> { 244 | let request = Endpoint::from_str(endpoint)?; 245 | 246 | assert_eq!(test_request::("hostname", request)?, hostname); 247 | 248 | Ok(()) 249 | } 250 | 251 | #[rstest] 252 | #[case("127.0.0.1:80", "127.0.0.1:80")] 253 | #[case("[::1]:80", "[::1]:80")] 254 | #[case("example.com:80", "example.com:80")] 255 | fn test_connect_request_endpoint( 256 | #[case] endpoint: &str, 257 | #[case] expect_endpoint: &str, 258 | ) -> Result<()> { 259 | let request = Endpoint::from_str(endpoint)?; 260 | 261 | assert_eq!( 262 | test_request::("endpoint", request)?, 263 | expect_endpoint 264 | ); 265 | 266 | Ok(()) 267 | } 268 | 269 | #[rstest] 270 | #[case("127.0.0.1:80", true)] 271 | #[case("[::1]:80", true)] 272 | #[case("example.com:80", false)] 273 | fn test_connect_request_host_is_ip(#[case] endpoint: &str, #[case] is_ip: bool) -> Result<()> { 274 | let request = Endpoint::from_str(endpoint)?; 275 | 276 | assert_eq!(test_request::("hostname_is_ip", request)?, is_ip); 277 | 278 | Ok(()) 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /core/src/config/engine/geoip.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::engine::connect::IoWrapper, Result}; 2 | use cached::proc_macro::cached; 3 | use flate2::read::GzDecoder; 4 | use http_body_util::{BodyExt, Empty}; 5 | use hyper::{Method, Request}; 6 | use hyper_util::rt::TokioIo; 7 | use maxminddb::{geoip2::Country, Mmap, Reader}; 8 | use rune::{ 9 | runtime::{Function, Ref}, 10 | Any, 11 | }; 12 | use std::{ 13 | env, 14 | fs::{create_dir_all, read_dir}, 15 | net::IpAddr, 16 | sync::Arc, 17 | }; 18 | use tar::Archive; 19 | use tempfile::tempdir; 20 | use tracing::{debug, info}; 21 | 22 | #[derive(Any, Debug, Clone)] 23 | pub struct GeoIp { 24 | reader: Arc>, 25 | } 26 | 27 | #[cached(name = "GEOIP_FROM_ABSOLUTE_PATH", result = true)] 28 | fn from_absolute_path_impl(path: String) -> Result { 29 | let reader = Reader::open_mmap(path)?; 30 | Ok(GeoIp { 31 | reader: Arc::new(reader), 32 | }) 33 | } 34 | 35 | impl GeoIp { 36 | #[rune::function(path = Self::from_absolute_path)] 37 | pub fn from_absolute_path(path: Ref) -> Result { 38 | from_absolute_path_impl(path.to_owned()).map_err(|e| { 39 | e.context(format!( 40 | "Failed to load GeoIP database from {}", 41 | path.as_ref() 42 | )) 43 | }) 44 | } 45 | 46 | #[rune::function(path = Self::from_license)] 47 | pub async fn from_license( 48 | license: Ref, 49 | handler: Function, 50 | force_update: bool, 51 | ) -> Result { 52 | let temp_dir = env::temp_dir().join("dandelion/geoip"); 53 | let db_path = temp_dir.join("GeoLite2-Country.mmdb"); 54 | 55 | if !force_update { 56 | // first try to load the file 57 | if let Ok(reader) = Reader::open_mmap(&db_path) { 58 | debug!( 59 | "Found existing GeoList2 database from {}", 60 | db_path.to_str().unwrap() 61 | ); 62 | return Ok(Self { 63 | reader: Arc::new(reader), 64 | }); 65 | } 66 | } 67 | 68 | let dir = tempdir()?; 69 | 70 | info!( 71 | "Downloading GeoLite2 database from remote to temp folder {} ...", 72 | dir.path().to_str().unwrap() 73 | ); 74 | 75 | let io = handler 76 | .async_send_call::<(String,), IoWrapper>(("download.maxmind.com:443".to_owned(),)) 77 | .await 78 | .into_result()? 79 | .into_inner(); 80 | 81 | let path = format!( 82 | "/app/geoip_download?edition_id=GeoLite2-Country&license_key={}&suffix=tar.gz", 83 | license.as_ref() 84 | ); 85 | 86 | let (mut request_sender, connection) = 87 | hyper::client::conn::http1::handshake(TokioIo::new(io)).await?; 88 | 89 | let connection_task = tokio::task::spawn(async move { 90 | if let Err(err) = connection.await { 91 | if err.is_canceled() { 92 | return; 93 | } 94 | 95 | debug!("Connection to download GeoIP failed: {:?}", err); 96 | } 97 | }); 98 | 99 | let req = Request::builder() 100 | .method(Method::GET) 101 | .uri(format!("https://download.maxmind.com{}", path)) 102 | .header("Host", "download.maxmind.com") 103 | .header("User-Agent", "dandelion/1.0") 104 | .header("Connection", "close") 105 | .body(Empty::::new())?; 106 | 107 | let response = request_sender.send_request(req).await?; 108 | 109 | if response.status() != 200 { 110 | return Err(anyhow::anyhow!( 111 | "HTTP request failed: {}", 112 | response.status() 113 | )); 114 | } 115 | 116 | let body = response.collect().await?.to_bytes(); 117 | 118 | // Force abort the connection task since we're done with the response 119 | connection_task.abort(); 120 | 121 | let tar = GzDecoder::new(body.as_ref()); 122 | let mut archive = Archive::new(tar); 123 | archive.unpack(&dir)?; 124 | 125 | // The file is extracted to a folder with the release data of 126 | // the database 127 | 128 | // We first try to find the folder 129 | let db_temp_dir = read_dir(&dir)? 130 | .filter_map(|e| e.ok()) 131 | .find(|e| e.path().is_dir()) 132 | .ok_or_else(|| { 133 | anyhow::anyhow!( 134 | "Failed to find the downloaded file. Maxmind changed the archive structure?" 135 | ) 136 | })? 137 | .path(); 138 | 139 | create_dir_all(db_path.parent().unwrap())?; 140 | 141 | std::fs::copy(db_temp_dir.join("GeoLite2-Country.mmdb"), &db_path)?; 142 | info!("Downloaded GeoLite2 database"); 143 | 144 | Ok(Self { 145 | reader: Arc::new(Reader::open_mmap(&db_path)?), 146 | }) 147 | } 148 | 149 | // We don't differentiate any error here, just return an empty string. 150 | // User should not care about the internal implementation of maxminddb. 151 | #[rune::function] 152 | pub fn lookup(&self, ip: &str) -> String { 153 | let ip: IpAddr = match ip.parse() { 154 | Ok(ip) => ip, 155 | Err(_) => return "".to_owned(), 156 | }; 157 | 158 | match self.reader.lookup::(ip) { 159 | Ok(country) => country 160 | .and_then(|c| c.country) 161 | .and_then(|c| c.iso_code) 162 | .unwrap_or(""), 163 | Err(_) => "", 164 | } 165 | .to_owned() 166 | } 167 | 168 | pub fn module() -> Result { 169 | let mut module = rune::Module::new(); 170 | 171 | module.ty::()?; 172 | 173 | module.function_meta(Self::lookup)?; 174 | module.function_meta(Self::from_absolute_path)?; 175 | module.function_meta(Self::from_license)?; 176 | 177 | Ok(module) 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /core/src/config/engine/iplist.rs: -------------------------------------------------------------------------------- 1 | use std::{net::IpAddr, sync::Arc}; 2 | 3 | use crate::Result; 4 | use ipnetwork::IpNetwork; 5 | use rune::{runtime::Vec as RuneVec, Any, FromValue, Module}; 6 | 7 | use crate::config::rune::create_wrapper; 8 | 9 | create_wrapper!(IpNetworkSetWrapper, Arc>); 10 | 11 | #[rune::function] 12 | pub fn new_iplist(ips: RuneVec) -> Result { 13 | Ok(Arc::new( 14 | ips.into_iter() 15 | .map(|ip| anyhow::Ok(String::from_value(ip)?.parse()?)) 16 | .try_fold(Vec::new(), |mut ips, ip| { 17 | ips.push(ip?); 18 | anyhow::Ok(ips) 19 | })?, 20 | ) 21 | .into()) 22 | } 23 | 24 | impl IpNetworkSetWrapper { 25 | fn contains_impl(&self, ip: &str) -> Result { 26 | let ip: IpAddr = ip.parse()?; 27 | 28 | Ok(self.inner().iter().any(|network| network.contains(ip))) 29 | } 30 | 31 | #[rune::function] 32 | pub fn contains(&self, ip: &str) -> Result { 33 | self.contains_impl(ip) 34 | } 35 | 36 | #[rune::function] 37 | pub fn contains_any(&self, ips: &RuneVec) -> Result { 38 | for ip in ips { 39 | let result = self.contains_impl(ip.borrow_string_ref()?.as_ref())?; 40 | 41 | if result { 42 | return Ok(true); 43 | } 44 | } 45 | 46 | Ok(false) 47 | } 48 | 49 | pub fn module() -> Result { 50 | let mut module = Module::new(); 51 | 52 | module.ty::()?; 53 | module.function_meta(new_iplist)?; 54 | module.function_meta(Self::contains)?; 55 | module.function_meta(Self::contains_any)?; 56 | 57 | Ok(module) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /core/src/config/engine/mod.rs: -------------------------------------------------------------------------------- 1 | mod connect; 2 | mod geoip; 3 | mod iplist; 4 | mod resolver; 5 | 6 | use std::{collections::HashMap, net::SocketAddr, sync::Arc}; 7 | 8 | use crate::{ 9 | core::{ 10 | acceptor::{http, socks5}, 11 | endpoint::Endpoint, 12 | io::Io, 13 | }, 14 | Result, 15 | }; 16 | use anyhow::Context as AnyhowContext; 17 | use futures::{future::select_all, Future, FutureExt}; 18 | use rune::{ 19 | runtime::{RuntimeContext, VmSendExecution}, 20 | termcolor::{ColorChoice, StandardStream}, 21 | Any, Context, Diagnostics, Module, Source, Sources, Unit, Vm, 22 | }; 23 | use tokio::{ 24 | io::copy_bidirectional, 25 | net::{TcpListener, TcpStream}, 26 | }; 27 | 28 | use self::{ 29 | connect::{ConnectRequest, IoWrapper, QuicConnectionWrapper}, 30 | geoip::GeoIp, 31 | iplist::IpNetworkSetWrapper, 32 | resolver::ResolverWrapper, 33 | }; 34 | 35 | type HandlerName = String; 36 | 37 | #[derive(Debug, PartialEq)] 38 | pub enum AcceptorConfig { 39 | Socks5(SocketAddr, HandlerName), 40 | Http(SocketAddr, HandlerName), 41 | } 42 | 43 | #[derive(Debug, Any, Clone)] 44 | struct Cache { 45 | iplist: HashMap, 46 | geoip: Option, 47 | quic_connections: HashMap, 48 | } 49 | 50 | impl Cache { 51 | pub fn new() -> Self { 52 | Self { 53 | iplist: HashMap::new(), 54 | geoip: None, 55 | quic_connections: HashMap::new(), 56 | } 57 | } 58 | 59 | pub fn get_iplist(&self, name: &str) -> Result { 60 | self.iplist 61 | .get(name) 62 | .cloned() 63 | .ok_or_else(|| anyhow::anyhow!("iplist {} not found", name)) 64 | } 65 | 66 | pub fn get_geoip_db(&self) -> Result { 67 | self.geoip 68 | .clone() 69 | .ok_or_else(|| anyhow::anyhow!("geoip db not found")) 70 | } 71 | 72 | pub fn get_quic_connection(&self, name: &str) -> Result { 73 | self.quic_connections 74 | .get(name) 75 | .cloned() 76 | .ok_or_else(|| anyhow::anyhow!("quic connection {} not found", name)) 77 | } 78 | } 79 | 80 | impl Default for Cache { 81 | fn default() -> Self { 82 | Self::new() 83 | } 84 | } 85 | 86 | impl Cache { 87 | pub fn module() -> Result { 88 | let mut module = Module::new(); 89 | 90 | module.ty::()?; 91 | module.associated_function("try_get_iplist", Self::get_iplist)?; 92 | module.associated_function("try_get_geoip_db", Self::get_geoip_db)?; 93 | module.associated_function("try_get_quic_connection", Self::get_quic_connection)?; 94 | 95 | Ok(module) 96 | } 97 | } 98 | 99 | #[derive(Debug, Any)] 100 | struct Config { 101 | acceptors: Vec, 102 | cache: Cache, 103 | } 104 | 105 | impl Config { 106 | #[rune::function(path = Self::new)] 107 | pub fn new() -> Self { 108 | Self { 109 | acceptors: Vec::new(), 110 | cache: Cache::new(), 111 | } 112 | } 113 | 114 | #[rune::function] 115 | pub fn add_socks5_acceptor(&mut self, addr: &str, handler_name: &str) -> Result<()> { 116 | self.acceptors.push(AcceptorConfig::Socks5( 117 | addr.parse()?, 118 | handler_name.to_owned(), 119 | )); 120 | 121 | Ok(()) 122 | } 123 | 124 | #[rune::function] 125 | pub fn add_http_acceptor(&mut self, addr: &str, handler_name: &str) -> Result<()> { 126 | self.acceptors 127 | .push(AcceptorConfig::Http(addr.parse()?, handler_name.to_owned())); 128 | 129 | Ok(()) 130 | } 131 | 132 | #[rune::function] 133 | pub fn cache_iplist(&mut self, name: &str, iplist: IpNetworkSetWrapper) { 134 | self.cache.iplist.insert(name.to_owned(), iplist); 135 | } 136 | 137 | #[rune::function] 138 | pub fn cache_geoip_db(&mut self, db: GeoIp) { 139 | self.cache.geoip = Some(db); 140 | } 141 | 142 | #[rune::function] 143 | pub fn cache_quic_connection(&mut self, name: &str, connection: QuicConnectionWrapper) { 144 | self.cache 145 | .quic_connections 146 | .insert(name.to_owned(), connection); 147 | } 148 | } 149 | 150 | impl Config { 151 | fn module() -> Result { 152 | let mut module = Module::new(); 153 | 154 | module.ty::()?; 155 | module.function_meta(Self::new)?; 156 | module.function_meta(Self::add_socks5_acceptor)?; 157 | module.function_meta(Self::add_http_acceptor)?; 158 | module.function_meta(Self::cache_iplist)?; 159 | module.function_meta(Self::cache_geoip_db)?; 160 | module.function_meta(Self::cache_quic_connection)?; 161 | 162 | Ok(module) 163 | } 164 | } 165 | 166 | pub struct Engine { 167 | context: Arc, 168 | unit: Arc, 169 | acceptors: Vec, 170 | cache: Cache, 171 | } 172 | 173 | impl Engine { 174 | pub async fn load_config(name: impl AsRef, code: impl AsRef) -> Result { 175 | let mut sources = Sources::new(); 176 | sources.insert(Source::new(name, code)?)?; 177 | 178 | let mut context = Context::with_default_modules()?; 179 | context.install(Config::module()?)?; 180 | context.install(ConnectRequest::module()?)?; 181 | context.install(ResolverWrapper::module()?)?; 182 | context.install(IpNetworkSetWrapper::module()?)?; 183 | context.install(Cache::module()?)?; 184 | context.install(GeoIp::module()?)?; 185 | 186 | let mut diagnostics = Diagnostics::new(); 187 | let result = rune::prepare(&mut sources) 188 | .with_context(&context) 189 | .with_diagnostics(&mut diagnostics) 190 | .build(); 191 | 192 | if !diagnostics.is_empty() { 193 | let mut writer = StandardStream::stderr(ColorChoice::Always); 194 | diagnostics.emit(&mut writer, &sources)?; 195 | } 196 | 197 | let context = Arc::new(context.runtime()?); 198 | let unit = Arc::new(result?); 199 | 200 | let mut vm = Vm::new(context.clone(), unit.clone()); 201 | 202 | log::info!("Configuring rule engine..."); 203 | 204 | let config: Config = 205 | rune::from_value::>(vm.async_call(["config"], ()).await?)??; 206 | log::info!("Done"); 207 | 208 | Ok(Self { 209 | context, 210 | unit, 211 | acceptors: config.acceptors, 212 | cache: config.cache, 213 | }) 214 | } 215 | 216 | fn vm(&self) -> Vm { 217 | Vm::new(self.context.clone(), self.unit.clone()) 218 | } 219 | 220 | pub fn create_handler_execution( 221 | &self, 222 | name: impl AsRef, 223 | endpoint: Endpoint, 224 | ) -> Result { 225 | Ok(self.vm().send_execute( 226 | [name.as_ref()], 227 | (ConnectRequest::new(endpoint), self.cache.clone()), 228 | )?) 229 | } 230 | 231 | pub async fn run(self) -> Result<()> { 232 | let self_ptr = Arc::new(self); 233 | 234 | select_all(self_ptr.clone().acceptors.iter().map(|c| { 235 | match c { 236 | AcceptorConfig::Socks5(addr, handler) => handle_acceptors( 237 | addr, 238 | socks5::handshake, 239 | self_ptr.clone(), 240 | handler.to_owned(), 241 | ) 242 | .boxed(), 243 | AcceptorConfig::Http(addr, handler) => { 244 | handle_acceptors(addr, http::handshake, self_ptr.clone(), handler.to_owned()) 245 | .boxed() 246 | } 247 | } 248 | })) 249 | .await 250 | .0 251 | } 252 | } 253 | 254 | pub async fn handle_acceptors< 255 | F: Future> + Send)>> 256 | + 'static 257 | + Send, 258 | >( 259 | addr: &SocketAddr, 260 | handshake: fn(TcpStream) -> F, 261 | engine: Arc, 262 | eval_fn: String, 263 | ) -> Result<()> { 264 | let listener = TcpListener::bind(addr).await?; 265 | 266 | loop { 267 | let io = listener.accept().await?.0; 268 | 269 | let engine = engine.clone(); 270 | let eval_fn = eval_fn.clone(); 271 | 272 | tokio::task::spawn(async move { 273 | if let Err(e) = async move { 274 | let (endpoint, fut) = handshake(io).await?; 275 | 276 | let endpoint_cloned = endpoint.clone(); 277 | async move { 278 | let execution = engine.create_handler_execution(eval_fn, endpoint)?; 279 | 280 | let mut remote = rune::from_value::>( 281 | execution 282 | .async_complete() 283 | .await 284 | // a VmResult here 285 | .into_result()?, // Unwrap it gives return value of the call, 286 | // the return value is of type `Value`, but it's actually a `Result`. 287 | )?? 288 | .into_inner(); 289 | 290 | let mut local = fut.await?; 291 | 292 | copy_bidirectional(&mut local, &mut remote) 293 | .await 294 | .context("Error happened when forwarding data")?; 295 | 296 | anyhow::Ok(()) 297 | } 298 | .await 299 | .with_context(|| format!("target endpoint {}", endpoint_cloned)) 300 | } 301 | .await 302 | { 303 | tracing::error!("{:?}", e) 304 | } 305 | }); 306 | } 307 | } 308 | 309 | #[cfg(test)] 310 | mod tests { 311 | 312 | use super::*; 313 | 314 | #[tokio::test] 315 | async fn test_add_acceptor() -> Result<()> { 316 | let engine = Engine::load_config( 317 | "config", 318 | r#" 319 | pub async fn config() { 320 | let config = Config::new(); 321 | 322 | config.add_socks5_acceptor("127.0.0.1:8080", "handler")?; 323 | config.add_http_acceptor("127.0.0.1:8081", "handler")?; 324 | 325 | Ok(config) 326 | } 327 | "#, 328 | ) 329 | .await?; 330 | 331 | assert_eq!( 332 | engine.acceptors, 333 | vec![ 334 | AcceptorConfig::Socks5("127.0.0.1:8080".parse().unwrap(), "handler".to_owned()), 335 | AcceptorConfig::Http("127.0.0.1:8081".parse().unwrap(), "handler".to_owned()) 336 | ] 337 | ); 338 | 339 | Ok(()) 340 | } 341 | } 342 | -------------------------------------------------------------------------------- /core/src/config/engine/resolver.rs: -------------------------------------------------------------------------------- 1 | use std::{net::SocketAddr, sync::Arc, time::Duration}; 2 | 3 | use crate::{ 4 | core::resolver::{hickory::HickoryResolver, system::SystemResolver, Resolver}, 5 | Result, 6 | }; 7 | use cached::{proc_macro::cached, Cached}; 8 | use hickory_proto::xfer::Protocol; 9 | use hickory_resolver::config::NameServerConfig; 10 | use itertools::Itertools; 11 | use rune::{ 12 | runtime::{Ref, Vec as RuneVec}, 13 | Any, FromValue, Module, ToValue, Value, 14 | }; 15 | 16 | use crate::config::rune::create_wrapper; 17 | 18 | create_wrapper!(ResolverWrapper, Resolver, Arc); 19 | 20 | impl Clone for ResolverWrapper { 21 | fn clone(&self) -> Self { 22 | Self(self.0.clone()) 23 | } 24 | } 25 | 26 | #[rune::function] 27 | fn create_system_resolver() -> Result { 28 | Ok(create_system_resolver_impl()?.into()) 29 | } 30 | 31 | #[cached(name = "SYSTEM_RESOLVER", result = true)] 32 | fn create_system_resolver_impl() -> Result> { 33 | Ok(Arc::new(SystemResolver::default())) 34 | } 35 | 36 | #[rune::function] 37 | fn create_udp_resolver(addrs: RuneVec) -> Result { 38 | Ok(create_udp_resolver_impl( 39 | addrs 40 | .into_iter() 41 | .map(|addr| anyhow::Ok(String::from_value(addr)?.parse::()?)) 42 | .try_collect()?, 43 | )? 44 | .into()) 45 | } 46 | 47 | #[cached(name = "UDP_RESOLVER", result = true)] 48 | fn create_udp_resolver_impl(addrs: Vec) -> Result> { 49 | Ok(Arc::new(HickoryResolver::new( 50 | addrs 51 | .into_iter() 52 | .map(|s| NameServerConfig::new(s, Protocol::Udp)) 53 | .collect(), 54 | Duration::from_secs(5), 55 | )?)) 56 | } 57 | 58 | impl ResolverWrapper { 59 | pub fn module() -> Result { 60 | let mut module = Module::new(); 61 | 62 | module.ty::()?; 63 | 64 | module.function_meta(create_system_resolver)?; 65 | module.function_meta(create_udp_resolver)?; 66 | 67 | module.function_meta(Self::lookup)?; 68 | module.function_meta(Self::lookup_ipv4)?; 69 | module.function_meta(Self::lookup_ipv6)?; 70 | 71 | Ok(module) 72 | } 73 | 74 | pub fn clear_cache() { 75 | UDP_RESOLVER 76 | .lock() 77 | .expect("Failed to clear cache for udp resolver") 78 | .cache_clear(); 79 | SYSTEM_RESOLVER 80 | .lock() 81 | .expect("Failed to clear cache for system resolver") 82 | .cache_clear(); 83 | } 84 | 85 | // See https://docs.rs/rune/latest/rune/struct.Module.html#method.function_meta 86 | // for why use `this` instead of `self` 87 | #[rune::function(instance, path = Self::lookup)] 88 | async fn lookup(this: Ref, hostname: Ref) -> Result { 89 | Ok(this 90 | .inner() 91 | .lookup_ip(hostname.as_ref()) 92 | .await? 93 | .into_iter() 94 | .map(|ip| ip.to_string().to_value()) 95 | .collect::, _>>()? 96 | .try_into()?) 97 | } 98 | 99 | #[rune::function(instance, path = Self::lookup_ipv4)] 100 | async fn lookup_ipv4(this: Ref, hostname: Ref) -> Result { 101 | Ok(this 102 | .inner() 103 | .lookup_ipv4(hostname.as_ref()) 104 | .await? 105 | .into_iter() 106 | .map(|ip| ip.to_string().to_value()) 107 | .collect::, _>>()? 108 | .try_into()?) 109 | } 110 | 111 | #[rune::function(instance, path = Self::lookup_ipv6)] 112 | async fn lookup_ipv6(this: Ref, hostname: Ref) -> Result { 113 | Ok(this 114 | .inner() 115 | .lookup_ipv6(hostname.as_ref()) 116 | .await? 117 | .into_iter() 118 | .map(|ip| ip.to_string().to_value()) 119 | .collect::, _>>()? 120 | .try_into()?) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /core/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | mod engine; 2 | mod rune; 3 | 4 | pub use engine::Engine; 5 | -------------------------------------------------------------------------------- /core/src/config/rune.rs: -------------------------------------------------------------------------------- 1 | macro_rules! create_wrapper { 2 | ($name:ident, $inner:ty) => { 3 | #[derive(Any, Clone, Debug)] 4 | pub struct $name($inner); 5 | 6 | impl $name { 7 | pub fn into_inner(self) -> $inner { 8 | self.0 9 | } 10 | 11 | pub fn inner(&self) -> &$inner { 12 | &self.0 13 | } 14 | } 15 | 16 | impl From<$inner> for $name { 17 | fn from(t: $inner) -> Self { 18 | Self(t) 19 | } 20 | } 21 | }; 22 | ($name:ident, $trait:ident, $box:ident) => { 23 | #[derive(Any, Debug)] 24 | pub struct $name($box); 25 | 26 | impl $name { 27 | pub fn into_inner(self) -> $box { 28 | self.0 29 | } 30 | 31 | pub fn inner(&self) -> &$box { 32 | &self.0 33 | } 34 | } 35 | 36 | impl From for $name { 37 | fn from(t: T) -> Self { 38 | Self($box::new(t)) 39 | } 40 | } 41 | }; 42 | } 43 | 44 | pub(crate) use create_wrapper; 45 | -------------------------------------------------------------------------------- /core/src/core/acceptor/http.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{endpoint::Endpoint, io::Io}; 2 | use anyhow::{bail, ensure, Result}; 3 | use bytes::Bytes; 4 | use futures::{Future, FutureExt}; 5 | use http::{ 6 | header::{CONNECTION, HOST, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION}, 7 | Method, Request, Response, 8 | }; 9 | use http_body_util::{Either, Empty}; 10 | use hyper::{ 11 | body::Incoming, client::conn::http1::SendRequest, server::conn::http1::Builder, 12 | service::service_fn, 13 | }; 14 | use hyper_util::rt::TokioIo; 15 | use std::{str::FromStr, sync::Arc}; 16 | use tokio::{ 17 | io::duplex, 18 | sync::{ 19 | oneshot::{channel, Receiver, Sender}, 20 | Mutex, 21 | }, 22 | }; 23 | 24 | enum State { 25 | NotConnected(Option), 26 | Connected((Endpoint, SendRequest)), 27 | } 28 | 29 | struct ConnectSignal { 30 | endpoint_tx: Sender<(bool, Endpoint)>, 31 | done_rx: Receiver>>, 32 | } 33 | 34 | fn transform_proxy_request(mut request: Request) -> Option> { 35 | if !request.headers().contains_key(HOST) { 36 | let host = request.uri().authority()?.host().parse().ok()?; 37 | request.headers_mut().insert(HOST, host); 38 | } 39 | 40 | *request.uri_mut() = request.uri().path_and_query()?.as_str().parse().ok()?; 41 | 42 | request.headers_mut().remove(PROXY_AUTHENTICATE); 43 | request.headers_mut().remove(PROXY_AUTHORIZATION); 44 | 45 | // Map Proxy-Connection to Connection if necessary 46 | if let Some(c) = request.headers_mut().remove("Proxy-Connection") { 47 | request.headers_mut().entry(CONNECTION).or_insert(c); 48 | } 49 | 50 | Some(request) 51 | } 52 | 53 | async fn handler( 54 | request: Request, 55 | state: Arc>, 56 | ) -> Result>>> { 57 | let mut state = state.lock().await; 58 | 59 | if matches!(request.method(), &Method::CONNECT) { 60 | if let State::NotConnected(signal) = &mut *state { 61 | if let Some(signal) = signal.take() { 62 | signal 63 | .endpoint_tx 64 | .send((true, Endpoint::from_str(&request.uri().to_string())?)) 65 | .expect("the other side should not be released"); 66 | 67 | signal 68 | .done_rx 69 | .await 70 | .expect("the done signal should be sent before polling the connection"); 71 | 72 | return Ok(Response::new(Either::Right(Empty::new()))); 73 | } 74 | } 75 | bail!("The CONNECT method can only be send in the first header") 76 | } else { 77 | match &mut *state { 78 | State::NotConnected(signal) => match signal.take() { 79 | Some(signal) => { 80 | let host = request.uri().host().ok_or_else(|| { 81 | anyhow::anyhow!("Invalid proxy request with no host in uri.") 82 | })?; 83 | 84 | let endpoint = Endpoint::from_str(host) 85 | .or_else(|_| Endpoint::from_str(format!("{}:80", host).as_str()))?; 86 | 87 | signal 88 | .endpoint_tx 89 | .send((false, endpoint.clone())) 90 | .expect("the other side should not be released"); 91 | 92 | let mut send_request = signal 93 | .done_rx 94 | .await 95 | .expect("the done signal should be sent before polling the connection") 96 | .unwrap(); 97 | 98 | let request = transform_proxy_request(request) 99 | .ok_or_else(|| anyhow::anyhow!("Not a valid proxy request"))?; 100 | let response_fut = send_request.send_request(request); 101 | 102 | *state = State::Connected((endpoint, send_request)); 103 | 104 | let (parts, body) = response_fut.await?.into_parts(); 105 | 106 | Ok(Response::from_parts(parts, Either::Left(body))) 107 | } 108 | None => { 109 | unreachable!() 110 | } 111 | }, 112 | State::Connected((ref target_endpoint, ref mut send_request)) => { 113 | let host = request 114 | .uri() 115 | .host() 116 | .ok_or_else(|| anyhow::anyhow!("Invalid proxy request with no host in uri"))?; 117 | 118 | let endpoint = Endpoint::from_str(host) 119 | .or_else(|_| Endpoint::from_str(format!("{}:80", host).as_str()))?; 120 | 121 | ensure!( 122 | &endpoint == target_endpoint, 123 | "Do not support using same connection for requests to different hosts" 124 | ); 125 | 126 | let request = transform_proxy_request(request) 127 | .ok_or_else(|| anyhow::anyhow!("Not a valid proxy request"))?; 128 | 129 | send_request.ready().await?; 130 | 131 | let (parts, body) = send_request.send_request(request).await?.into_parts(); 132 | 133 | Ok(Response::from_parts(parts, Either::Left(body))) 134 | } 135 | } 136 | } 137 | } 138 | 139 | pub async fn handshake( 140 | io: impl Io, 141 | ) -> Result<(Endpoint, impl Future> + Send)> { 142 | let (endpoint_tx, endpoint_rx) = channel(); 143 | let (done_tx, done_rx) = channel(); 144 | 145 | let state = Arc::new(Mutex::new(State::NotConnected(Some(ConnectSignal { 146 | endpoint_tx, 147 | done_rx, 148 | })))); 149 | 150 | let mut conn = Builder::new() 151 | .serve_connection( 152 | TokioIo::new(io), 153 | service_fn(move |req| { 154 | { 155 | let state = state.clone(); 156 | handler(req, state) 157 | } 158 | .boxed() 159 | }), 160 | ) 161 | .without_shutdown(); 162 | 163 | let endpoint = tokio::select! { 164 | _ = &mut conn => { 165 | // Connection terminated before getting first header. Close it. 166 | bail!("No HTTP request received."); 167 | } 168 | result = endpoint_rx => { 169 | match result { 170 | Ok(endpoint) => endpoint, 171 | Err(_) => unreachable!(), 172 | } 173 | } 174 | }; 175 | 176 | if endpoint.0 { 177 | Ok(( 178 | endpoint.1, 179 | async move { 180 | done_tx 181 | .send(None) 182 | .expect("bug: the done signal receiver should not be deallocated"); 183 | 184 | let part = conn.await?; 185 | 186 | let io: Box = Box::new(part.io.into_inner()); 187 | Ok(io) 188 | } 189 | .boxed(), 190 | )) 191 | } else { 192 | Ok(( 193 | endpoint.1, 194 | async move { 195 | // 64KB 196 | let (s1, s2) = duplex(65536); 197 | 198 | let (request_sender, connection) = 199 | hyper::client::conn::http1::handshake(TokioIo::new(s1)).await?; 200 | 201 | done_tx 202 | .send(Some(request_sender)) 203 | .expect("bug: the done signal receiver should not be deallocated"); 204 | 205 | // We don't really care the error from here since it will drop the connection. 206 | // We will then read the EOF from the other side. 207 | tokio::task::spawn(conn); 208 | tokio::task::spawn(connection); 209 | 210 | let io: Box = Box::new(s2); 211 | Ok(io) 212 | } 213 | .boxed(), 214 | )) 215 | } 216 | } 217 | 218 | #[cfg(test)] 219 | mod tests { 220 | use http::Uri; 221 | use rstest::*; 222 | 223 | // Make sure the Uri crate would parse the data as we expected 224 | #[rstest] 225 | #[case("google.com", None)] 226 | #[case("https://google.com", Some("/"))] 227 | #[case("https://google.com/", Some("/"))] 228 | #[case("https://google.com/test", Some("/test"))] 229 | #[case("https://google.com/test?query=1", Some("/test?query=1"))] 230 | #[case("/test?query=1", Some("/test?query=1"))] 231 | #[case("/test", Some("/test"))] 232 | #[case("/", Some("/"))] 233 | #[trace] 234 | fn uri_parsed_as_expected(#[case] case: Uri, #[case] expected: Option<&str>) { 235 | let pq = case.path_and_query().map(|p| p.as_str()); 236 | assert_eq!(pq, expected); 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /core/src/core/acceptor/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod http; 2 | pub mod socks5; 3 | 4 | use crate::{ 5 | core::{endpoint::Endpoint, io::Io}, 6 | Result, 7 | }; 8 | use futures::{Future, Stream, TryFutureExt, TryStreamExt}; 9 | 10 | pub fn handle_connection_stream< 11 | Input: Io, 12 | F: Future>)>>, 13 | >( 14 | s: impl Stream>, 15 | handshake: impl Fn(Input) -> F + 'static, 16 | ) -> impl Stream>)>>> { 17 | s.and_then(move |io| handshake(io).map_ok(Ok)) 18 | } 19 | 20 | pub fn handle_connection_stream_with_config< 21 | Input: Io, 22 | C, 23 | F: Future>)>>, 24 | >( 25 | s: impl Stream>, 26 | handshake: impl Fn(Input, &C) -> F, 27 | config: C, 28 | ) -> impl Stream>)>>> { 29 | s.and_then(move |io| handshake(io, &config).map_ok(Ok)) 30 | } 31 | -------------------------------------------------------------------------------- /core/src/core/acceptor/socks5.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{endpoint::Endpoint, io::Io}, 3 | Result, 4 | }; 5 | use anyhow::{bail, ensure, Context}; 6 | use futures::Future; 7 | use std::net::{IpAddr, SocketAddr}; 8 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 9 | 10 | pub async fn handshake( 11 | mut io: impl Io, 12 | ) -> Result<(Endpoint, impl Future>)> { 13 | // Read hello 14 | let mut buf = [0; 2]; 15 | io.read_exact(&mut buf).await?; 16 | 17 | ensure!(buf[0] == 5, "Unsupported socks version: {}", buf[0]); 18 | 19 | ensure!( 20 | buf[1] != 0, 21 | "Invalid socks5 auth method count, should not be 0" 22 | ); 23 | 24 | // Read requested methods 25 | let mut buf = vec![0; buf[1].into()]; 26 | io.read_exact(&mut buf).await?; 27 | 28 | // Check if there is no auth requested since that's the only one we support 29 | ensure!( 30 | buf.contains(&0), 31 | "Only no auth is supported, but it's not requested in handshake" 32 | ); 33 | 34 | // Send back the method we support. 35 | let buf: [u8; 2] = [5, 0]; 36 | io.write_all(&buf).await?; 37 | 38 | // Read requested endpoint 39 | let mut buf = [0; 4]; 40 | io.read_exact(&mut buf).await?; 41 | 42 | ensure!(buf[0] == 5, "Unsupported socks version: {}", buf[0]); 43 | 44 | ensure!( 45 | buf[1] == 1, 46 | "Invalid socks5 command: {}, only 1 is supported", 47 | buf[1] 48 | ); 49 | 50 | enum IpOrDomain { 51 | Ip(IpAddr), 52 | Domain(String), 53 | } 54 | 55 | let request_type = buf[3]; 56 | 57 | let ip_or_domain = match request_type { 58 | 1 => { 59 | let mut buf = [0; 4]; 60 | io.read_exact(&mut buf).await?; 61 | IpOrDomain::Ip(IpAddr::from(buf)) 62 | } 63 | 3 => { 64 | let len: usize = io.read_u8().await?.into(); 65 | let mut buf = vec![0; len]; 66 | io.read_exact(&mut buf).await?; 67 | let domain = String::from_utf8(buf) 68 | .context("The socks5 client is not sending a valid domain")?; 69 | IpOrDomain::Domain(domain) 70 | } 71 | 4 => { 72 | let mut buf = [0; 16]; 73 | io.read_exact(&mut buf).await?; 74 | IpOrDomain::Ip(IpAddr::from(buf)) 75 | } 76 | t => bail!("Unsupported address type {}", t), 77 | }; 78 | 79 | let port = io.read_u16().await?; 80 | 81 | let endpoint = match ip_or_domain { 82 | IpOrDomain::Domain(d) => Endpoint::new_from_domain(&d, port), 83 | IpOrDomain::Ip(ip) => Endpoint::new_from_addr(SocketAddr::new(ip, port)), 84 | }; 85 | 86 | let response: &[u8] = match request_type { 87 | 1 | 3 => &[5, 0, 0, 1, 0, 0, 0, 0, 0, 0], 88 | 4 => &[ 89 | 5, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 90 | ], 91 | _ => unreachable!(), 92 | }; 93 | 94 | Ok((endpoint, async move { 95 | io.write_all(response).await?; 96 | Ok(io) 97 | })) 98 | } 99 | -------------------------------------------------------------------------------- /core/src/core/connector/block.rs: -------------------------------------------------------------------------------- 1 | use crate::{core::endpoint::Endpoint, Result}; 2 | use anyhow::bail; 3 | use futures::never::Never; 4 | 5 | pub async fn connect(endpoint: &Endpoint) -> Result { 6 | bail!("Connection to {} is blocked", endpoint); 7 | } 8 | -------------------------------------------------------------------------------- /core/src/core/connector/http.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{endpoint::Endpoint, io::Io}, 3 | Result, 4 | }; 5 | use anyhow::{bail, Context}; 6 | use bytes::BytesMut; 7 | use httparse::{Response, EMPTY_HEADER}; 8 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 9 | use tracing::debug; 10 | 11 | pub async fn connect(endpoint: &Endpoint, mut nexthop: impl Io) -> Result { 12 | debug!("Begin HTTP CONNECT handshake"); 13 | 14 | nexthop 15 | .write_all( 16 | format!( 17 | "CONNECT {} HTTP/1.1\r\nHost: {}\r\n\r\n", 18 | endpoint, endpoint 19 | ) 20 | .as_bytes(), 21 | ) 22 | .await 23 | .with_context(|| format!("Failed to send CONNECT request to connect to {}", endpoint))?; 24 | 25 | // We should not have a huge response 26 | let mut buf = BytesMut::with_capacity(4196); 27 | 28 | while nexthop 29 | .read_buf(&mut buf) 30 | .await 31 | .context("Failed to read CONNECT response")? 32 | != 0 33 | { 34 | let mut headers = [EMPTY_HEADER; 64]; 35 | let mut res = Response::new(&mut headers); 36 | 37 | if res.parse(&buf)?.is_complete() { 38 | if res.code == Some(200) { 39 | break; 40 | } else { 41 | bail!( 42 | "Failed to CONNECT to {}, got error response {}", 43 | endpoint, 44 | std::str::from_utf8(&buf)? 45 | ) 46 | } 47 | } 48 | } 49 | 50 | debug!("Finished HTTP CONNECT handshake"); 51 | 52 | Ok(nexthop) 53 | } 54 | -------------------------------------------------------------------------------- /core/src/core/connector/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod block; 2 | pub mod http; 3 | pub mod quic; 4 | pub mod simplex; 5 | pub mod socks5; 6 | pub mod speed; 7 | pub mod tcp; 8 | pub mod tls; 9 | -------------------------------------------------------------------------------- /core/src/core/connector/quic.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{ 3 | endpoint::Endpoint, 4 | quic::{client::create_quic_connection as client_connect, QuicStream}, 5 | resolver::Resolver, 6 | }, 7 | Result, 8 | }; 9 | use quinn::Connection; 10 | 11 | #[derive(Debug)] 12 | pub struct QuicConnection { 13 | inner: Connection, 14 | } 15 | 16 | pub async fn create_quic_connection( 17 | server: Endpoint, 18 | resolver: R, 19 | apln_protocols: Vec>, 20 | ) -> Result { 21 | Ok(QuicConnection { 22 | inner: client_connect(server, resolver, apln_protocols).await?, 23 | }) 24 | } 25 | 26 | pub async fn connect(connection: &QuicConnection) -> Result { 27 | let (send, recv) = connection.inner.open_bi().await?; 28 | 29 | Ok(QuicStream::new(send, recv)) 30 | } 31 | -------------------------------------------------------------------------------- /core/src/core/connector/simplex.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{ 3 | endpoint::Endpoint, 4 | io::Io, 5 | simplex::{client::connect as simplex_connect, Config}, 6 | }, 7 | Result, 8 | }; 9 | 10 | pub async fn connect(endpoint: &Endpoint, config: &Config, nexthop: impl Io) -> Result { 11 | let s = simplex_connect(nexthop, endpoint, config).await?; 12 | 13 | Ok(s) 14 | } 15 | -------------------------------------------------------------------------------- /core/src/core/connector/socks5.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{endpoint::Endpoint, io::Io}, 3 | Result, 4 | }; 5 | use anyhow::{bail, ensure, Context}; 6 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 7 | 8 | pub async fn connect(endpoint: &Endpoint, mut nexthop: impl Io) -> Result { 9 | nexthop.write_all(&[5, 1, 0]).await?; 10 | 11 | let mut buf = [0; 2]; 12 | nexthop.read_exact(&mut buf).await?; 13 | 14 | ensure!(buf[0] == 5, "Unsupported socks version: {}", buf[0]); 15 | ensure!( 16 | buf[1] == 0, 17 | "Server asked for auth method {} we don't support", 18 | buf[1] 19 | ); 20 | 21 | let len = endpoint.hostname().len().try_into().with_context(|| { 22 | "The socks5 protocol cannot support domain longer than 255 bytenexthop." 23 | })?; 24 | nexthop.write_all(&[5, 1, 0, 3, len]).await?; 25 | nexthop.write_all(endpoint.hostname().as_bytes()).await?; 26 | nexthop.write_all(&endpoint.port().to_be_bytes()).await?; 27 | 28 | let mut buf = [0; 4]; 29 | nexthop.read_exact(&mut buf).await?; 30 | ensure!(buf[0] == 5, "Unsupported socks version: {}", buf[0]); 31 | ensure!( 32 | buf[1] == 0, 33 | "Socks5 connection failed with status {}", 34 | buf[1] 35 | ); 36 | ensure!(buf[2] == 0, "Not recognized reserved field"); 37 | match buf[3] { 38 | 1 => { 39 | let mut buf = [0; 6]; 40 | nexthop.read_exact(&mut buf).await?; 41 | } 42 | 3 => { 43 | let len: usize = nexthop.read_u8().await?.into(); 44 | let mut buf = vec![0; len + 2]; 45 | nexthop.read_exact(&mut buf).await?; 46 | } 47 | 4 => { 48 | let mut buf = [0; 18]; 49 | nexthop.read_exact(&mut buf).await?; 50 | } 51 | _ => { 52 | bail!("Not recognized address type {}", buf[3]); 53 | } 54 | } 55 | 56 | Ok(nexthop) 57 | } 58 | -------------------------------------------------------------------------------- /core/src/core/connector/speed.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{endpoint::Endpoint, io::Io}; 2 | use anyhow::Result; 3 | use futures::{ 4 | future::{select_ok, FutureExt}, 5 | Future, 6 | }; 7 | use std::time::Duration; 8 | use tokio::time::sleep; 9 | 10 | pub async fn connect< 11 | F: Future> + Send, 12 | C: (FnOnce(&Endpoint) -> F) + Send, 13 | >( 14 | connectors: Vec<(Duration, C)>, 15 | endpoint: &Endpoint, 16 | ) -> Result { 17 | select_ok(connectors.into_iter().map(|c| { 18 | async move { 19 | sleep(c.0).await; 20 | 21 | c.1(endpoint).await 22 | } 23 | .boxed() 24 | })) 25 | .await 26 | .map(|r| r.0) 27 | } 28 | -------------------------------------------------------------------------------- /core/src/core/connector/tcp.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{endpoint::Endpoint, resolver::Resolver}, 3 | Result, 4 | }; 5 | use futures::{future::FusedFuture, Future, FutureExt, TryFutureExt}; 6 | use itertools::Itertools; 7 | use socket2::{Socket, TcpKeepalive}; 8 | use std::{ 9 | net::{IpAddr, Ipv4Addr, Ipv6Addr}, 10 | ops::Add, 11 | pin::Pin, 12 | time::{Duration, Instant}, 13 | vec::IntoIter, 14 | }; 15 | use tokio::{ 16 | net::TcpStream, 17 | time::{sleep_until, Sleep}, 18 | }; 19 | 20 | // TODO: Test the connector and add support for select IP versions. 21 | 22 | pub async fn connect(endpoint: &Endpoint, resolver: impl Resolver) -> Result { 23 | match endpoint { 24 | Endpoint::Addr(addr) => Ok(TcpStream::connect(addr).await?), 25 | Endpoint::Domain(host, port) => Ok(HappyEyeballConnector::new(&resolver, host, *port) 26 | .await 27 | .map(|s| { 28 | let s: Socket = s.into_std().unwrap().into(); 29 | let _ = s.set_tcp_keepalive( 30 | &TcpKeepalive::new() 31 | .with_time(Duration::from_secs(60)) 32 | .with_interval(Duration::from_secs(60)), 33 | ); 34 | let s: std::net::TcpStream = s.into(); 35 | TcpStream::from_std(s).unwrap() 36 | })?), 37 | } 38 | } 39 | 40 | // Implementing https://datatracker.ietf.org/doc/html/rfc8305 41 | // 42 | // This is actually super complicated to implement so it's very unfortunate that 43 | // rust std does not provide support for this. 44 | // 45 | // Anyway, this implementation is implemented based on RFC8305 without 46 | // preference for IPv6, i.e., we will start connecting when we get the first DNS 47 | // response instead of waiting for AAAA result. Given the current status of IPv6 48 | // connectivity, it may be better if we prefer IPv4 connection. 49 | #[pin_project::pin_project] 50 | struct HappyEyeballConnector<'a> { 51 | ipv4_future: Pin>> + Send + 'a>>, 52 | ipv6_future: Pin>> + Send + 'a>>, 53 | ips: IntoIter, 54 | ip_count: usize, 55 | connections: Vec> + Send + 'static>>>, 56 | next_connection_timer: Pin>, 57 | host: &'a str, 58 | port: u16, 59 | } 60 | 61 | impl<'a> HappyEyeballConnector<'a> { 62 | fn new(resolver: &'a impl Resolver, host: &'a str, port: u16) -> Self { 63 | Self { 64 | ipv4_future: Box::pin(resolver.lookup_ipv4(host).fuse()), 65 | ipv6_future: Box::pin(resolver.lookup_ipv6(host).fuse()), 66 | ips: Vec::new().into_iter(), 67 | ip_count: 0, 68 | connections: Vec::new(), 69 | next_connection_timer: Box::pin(sleep_until(Instant::now().into())), 70 | host, 71 | port, 72 | } 73 | } 74 | 75 | fn is_resolving(&self) -> bool { 76 | !(self.ipv4_future.is_terminated() && self.ipv6_future.is_terminated()) 77 | } 78 | } 79 | 80 | const CONNECTION_ATTEMP_DELAY: Duration = Duration::from_millis(250); 81 | 82 | impl Future for HappyEyeballConnector<'_> { 83 | type Output = Result; 84 | 85 | fn poll( 86 | mut self: std::pin::Pin<&mut Self>, 87 | cx: &mut std::task::Context<'_>, 88 | ) -> std::task::Poll { 89 | { 90 | // Only need this for swapping IP iterators 91 | let this = self.as_mut().project(); 92 | // First we poll the dns result. It doesn't matter in what we order we 93 | // poll it since we are doing it at the same time. 94 | if !this.ipv4_future.is_terminated() { 95 | if let std::task::Poll::Ready(Ok(addrs)) = this.ipv4_future.poll_unpin(cx) { 96 | *this.ip_count += addrs.len(); 97 | *this.ips = this 98 | .ips 99 | .interleave(addrs.into_iter().map(Into::into)) 100 | .collect_vec() 101 | .into_iter(); 102 | }; 103 | // Ignore error 104 | } 105 | 106 | if !this.ipv6_future.is_terminated() { 107 | if let std::task::Poll::Ready(Ok(addrs)) = this.ipv6_future.poll_unpin(cx) { 108 | *this.ip_count += addrs.len(); 109 | *this.ips = this 110 | .ips 111 | .interleave(addrs.into_iter().map(Into::into)) 112 | .collect_vec() 113 | .into_iter(); 114 | }; 115 | // Ignore error 116 | } 117 | } 118 | 119 | if !self.is_resolving() && self.ip_count == 0 { 120 | return std::task::Poll::Ready(Err(anyhow::anyhow!( 121 | "Failed to resolve domain {}", 122 | self.host 123 | ))); 124 | } 125 | 126 | // Now we poll all ongoing connections 127 | let (has_pending, has_error, maybe_stream) = 128 | self.connections 129 | .iter_mut() 130 | .fold((false, false, None), |state, c| { 131 | if state.2.is_some() { 132 | return state; 133 | } 134 | 135 | if c.is_terminated() { 136 | return state; 137 | } 138 | 139 | match c.poll_unpin(cx) { 140 | std::task::Poll::Ready(result) => match result { 141 | Ok(stream) => (state.0, state.1, Some(stream)), 142 | Err(_) => (state.0, true, None), 143 | }, 144 | std::task::Poll::Pending => (true, state.1, None), 145 | } 146 | }); 147 | 148 | if let Some(stream) = maybe_stream { 149 | return std::task::Poll::Ready(Ok(stream)); 150 | } 151 | 152 | // Check if we should make new connection 153 | if !has_pending // No ongoing connection, create a new one now. 154 | || has_error // One connection is ended, we should start a new one now. 155 | || self.next_connection_timer.as_mut().poll(cx) == std::task::Poll::Ready(()) 156 | { 157 | // Loop until we successfully makes a connection. 158 | loop { 159 | match self.ips.next() { 160 | Some(addr) => { 161 | let mut fut = Box::pin( 162 | TcpStream::connect((addr, self.port)) 163 | .map_err(|e| e.into()) 164 | .fuse(), 165 | ); 166 | match fut.poll_unpin(cx) { 167 | std::task::Poll::Ready(result) => match result { 168 | // This should be unreachable actually. 169 | Ok(s) => return std::task::Poll::Ready(Ok(s)), 170 | // Try next IP. 171 | Err(_) => continue, 172 | }, 173 | // Good, we initiated an ongoing connection. 174 | std::task::Poll::Pending => { 175 | self.next_connection_timer 176 | .as_mut() 177 | .reset(Instant::now().add(CONNECTION_ATTEMP_DELAY).into()); 178 | // The result should always be pending. 179 | assert_eq!( 180 | self.next_connection_timer.poll_unpin(cx), 181 | std::task::Poll::Pending 182 | ); 183 | self.connections.push(fut); 184 | break; 185 | } 186 | } 187 | } 188 | None => { 189 | if !self.is_resolving() { 190 | return std::task::Poll::Ready(Err(anyhow::anyhow!( 191 | "Failed to connect to domain {}", 192 | self.host 193 | ))); 194 | } else { 195 | break; 196 | } 197 | } 198 | } 199 | } 200 | } 201 | 202 | std::task::Poll::Pending 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /core/src/core/connector/tls.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{endpoint::Endpoint, io::Io}, 3 | Result, 4 | }; 5 | use anyhow::Context; 6 | 7 | pub async fn connect(endpoint: &Endpoint, nexthop: impl Io) -> Result { 8 | let s = tokio_native_tls::TlsConnector::from( 9 | tokio_native_tls::native_tls::TlsConnector::new() 10 | .context("Failed to create TLS connector")?, 11 | ) 12 | .connect(&endpoint.hostname(), nexthop) 13 | .await 14 | .with_context(|| format!("Failed to establish a secure connection to {}", endpoint))?; 15 | 16 | Ok(s) 17 | } 18 | -------------------------------------------------------------------------------- /core/src/core/endpoint.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Context}; 2 | use serde::Deserialize; 3 | use std::{fmt::Display, net::SocketAddr, str::FromStr}; 4 | 5 | #[derive(Debug, Clone, Deserialize, PartialEq, Eq)] 6 | pub enum Endpoint { 7 | Addr(SocketAddr), 8 | Domain(String, u16), 9 | } 10 | 11 | impl Endpoint { 12 | pub fn new_from_domain(domain: &str, port: u16) -> Self { 13 | Endpoint::Domain(domain.to_owned(), port) 14 | } 15 | 16 | pub fn new_from_addr(addr: SocketAddr) -> Self { 17 | Endpoint::Addr(addr) 18 | } 19 | 20 | pub fn hostname(&self) -> String { 21 | match self { 22 | Endpoint::Addr(addr) => addr.ip().to_string(), 23 | Endpoint::Domain(d, _) => d.to_owned(), 24 | } 25 | } 26 | 27 | pub fn port(&self) -> u16 { 28 | match self { 29 | Endpoint::Addr(addr) => addr.port(), 30 | Endpoint::Domain(_, port) => *port, 31 | } 32 | } 33 | } 34 | 35 | impl FromStr for Endpoint { 36 | type Err = anyhow::Error; 37 | 38 | fn from_str(value: &str) -> std::result::Result { 39 | value.parse().map(Endpoint::new_from_addr).or_else(|_| { 40 | value 41 | .rsplit_once(':') 42 | .ok_or_else(|| anyhow!("Endpoint string not valid, most likely port is missing")) 43 | .and_then(|(host, port)| { 44 | Ok(Endpoint::new_from_domain( 45 | host, 46 | port.parse().context("Failed to parse port for endpoint")?, 47 | )) 48 | }) 49 | }) 50 | } 51 | } 52 | 53 | impl Display for Endpoint { 54 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 55 | match self { 56 | Endpoint::Addr(addr) => write!(f, "{}", addr), 57 | Endpoint::Domain(d, p) => write!(f, "{}:{}", d, p), 58 | } 59 | } 60 | } 61 | 62 | #[cfg(test)] 63 | mod tests { 64 | use super::*; 65 | 66 | #[test] 67 | fn parse_from_str() { 68 | assert!(Endpoint::from_str("127.0.0.1:89").is_ok()); 69 | assert!(Endpoint::from_str("127.0.0.1").is_err()); 70 | assert!(Endpoint::from_str("google.com").is_err()); 71 | assert!(Endpoint::from_str("google.com:443").is_ok()); 72 | assert!(Endpoint::from_str("[fe::1]:443").is_ok()); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /core/src/core/io.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | use tokio::io::{AsyncRead, AsyncWrite}; 3 | 4 | pub trait Io: AsyncRead + AsyncWrite + Unpin + Send + 'static + Debug {} 5 | 6 | impl Io for T where T: AsyncRead + AsyncWrite + Unpin + Send + 'static + Debug {} 7 | -------------------------------------------------------------------------------- /core/src/core/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod acceptor; 2 | pub mod connector; 3 | pub mod endpoint; 4 | pub mod io; 5 | pub mod quic; 6 | pub mod resolver; 7 | pub mod simplex; 8 | -------------------------------------------------------------------------------- /core/src/core/quic/client.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::{ 4 | core::{endpoint::Endpoint, resolver::Resolver}, 5 | Result, 6 | }; 7 | use anyhow::bail; 8 | use futures::{future::select_ok, FutureExt}; 9 | use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Connection, Endpoint as QuicEndpoint}; 10 | use rustls_platform_verifier::ConfigVerifierExt; 11 | 12 | pub async fn create_quic_connection( 13 | server: Endpoint, 14 | resolver: R, 15 | alpn_protocols: Vec>, 16 | ) -> Result { 17 | match server { 18 | // It's too unlikely there will be a ip cert, plus we need to verify if 19 | // ring will correctly validate this. 20 | Endpoint::Addr(addr) => bail!( 21 | "Cannot connect to remote with ip {}, domain is required for certificate validation", 22 | addr 23 | ), 24 | Endpoint::Domain(host, port) => { 25 | let addrs = resolver.lookup_ip(&host).await?; 26 | let host_ref = &host; 27 | 28 | let crypto_config = { 29 | let mut config = rustls::ClientConfig::with_platform_verifier(); 30 | config.alpn_protocols = alpn_protocols; 31 | Arc::new(QuicClientConfig::try_from(config)?) 32 | }; 33 | 34 | let connection = select_ok(addrs.into_iter().map(|addr| { 35 | let config = ClientConfig::new(crypto_config.clone()); 36 | 37 | async move { 38 | match addr { 39 | std::net::IpAddr::V4(addr_v4) => Ok::<_, anyhow::Error>( 40 | QuicEndpoint::client((std::net::Ipv4Addr::UNSPECIFIED, 0).into())? 41 | .connect_with(config, (addr_v4, port).into(), host_ref)? 42 | .await?, 43 | ), 44 | std::net::IpAddr::V6(addr_v6) => Ok(QuicEndpoint::client( 45 | (std::net::Ipv6Addr::UNSPECIFIED, 0).into(), 46 | )? 47 | .connect_with(config, (addr_v6, port).into(), host_ref)? 48 | .await?), 49 | } 50 | } 51 | .boxed() 52 | })) 53 | .await? 54 | .0; 55 | 56 | Ok(connection) 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /core/src/core/quic/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | 3 | use quinn::{RecvStream, SendStream}; 4 | use tokio::io::{AsyncRead, AsyncWrite}; 5 | 6 | #[derive(Debug)] 7 | #[pin_project::pin_project] 8 | pub struct QuicStream { 9 | #[pin] 10 | send: SendStream, 11 | #[pin] 12 | recv: RecvStream, 13 | } 14 | 15 | impl QuicStream { 16 | pub fn new(send: SendStream, recv: RecvStream) -> Self { 17 | Self { send, recv } 18 | } 19 | } 20 | 21 | impl AsyncRead for QuicStream { 22 | fn poll_read( 23 | self: std::pin::Pin<&mut Self>, 24 | cx: &mut std::task::Context<'_>, 25 | buf: &mut tokio::io::ReadBuf<'_>, 26 | ) -> std::task::Poll> { 27 | self.project().recv.poll_read(cx, buf) 28 | } 29 | } 30 | 31 | impl AsyncWrite for QuicStream { 32 | fn poll_write( 33 | self: std::pin::Pin<&mut Self>, 34 | cx: &mut std::task::Context<'_>, 35 | buf: &[u8], 36 | ) -> std::task::Poll> { 37 | self.project().send.poll_write(cx, buf).map_err(Into::into) 38 | } 39 | 40 | fn poll_flush( 41 | self: std::pin::Pin<&mut Self>, 42 | cx: &mut std::task::Context<'_>, 43 | ) -> std::task::Poll> { 44 | self.project().send.poll_flush(cx) 45 | } 46 | 47 | fn poll_shutdown( 48 | self: std::pin::Pin<&mut Self>, 49 | cx: &mut std::task::Context<'_>, 50 | ) -> std::task::Poll> { 51 | self.project().send.poll_shutdown(cx) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /core/src/core/resolver/hickory.rs: -------------------------------------------------------------------------------- 1 | use super::Resolver; 2 | use crate::Result; 3 | use anyhow::bail; 4 | use hickory_proto::op::{Message, MessageType}; 5 | use hickory_resolver::{ 6 | config::{NameServerConfig, ResolverConfig, ResolverOpts}, 7 | name_server::TokioConnectionProvider, 8 | TokioResolver, 9 | }; 10 | use std::{ 11 | net::{IpAddr, Ipv4Addr, Ipv6Addr}, 12 | time::Duration, 13 | }; 14 | 15 | #[derive(Debug)] 16 | pub struct HickoryResolver { 17 | client: TokioResolver, 18 | } 19 | 20 | impl HickoryResolver { 21 | pub fn new(nameservers: Vec, timeout: Duration) -> Result { 22 | let mut options = ResolverOpts::default(); 23 | options.timeout = timeout; 24 | 25 | let mut config = ResolverConfig::default(); 26 | for nameserver in nameservers { 27 | config.add_name_server(nameserver); 28 | } 29 | 30 | Ok(Self { 31 | client: TokioResolver::builder_with_config(config, TokioConnectionProvider::default()) 32 | .with_options(options) 33 | .build(), 34 | }) 35 | } 36 | } 37 | 38 | #[async_trait::async_trait] 39 | impl Resolver for HickoryResolver { 40 | async fn lookup_ip(&self, name: &str) -> Result> { 41 | Ok(self.client.lookup_ip(name).await?.into_iter().collect()).and_then(|r: Vec| { 42 | if r.is_empty() { 43 | bail!("Failed to find result for domain {}", name) 44 | } else { 45 | Ok(r) 46 | } 47 | }) 48 | } 49 | 50 | async fn lookup_ipv4(&self, name: &str) -> Result> { 51 | Ok(self 52 | .client 53 | .ipv4_lookup(name) 54 | .await? 55 | .into_iter() 56 | .map(Into::into) 57 | .collect()) 58 | .and_then(|r: Vec| { 59 | if r.is_empty() { 60 | bail!("Failed to find result for domain {}", name) 61 | } else { 62 | Ok(r) 63 | } 64 | }) 65 | } 66 | 67 | async fn lookup_ipv6(&self, name: &str) -> Result> { 68 | Ok(self 69 | .client 70 | .ipv6_lookup(name) 71 | .await? 72 | .into_iter() 73 | .map(Into::into) 74 | .collect()) 75 | .and_then(|r: Vec| { 76 | if r.is_empty() { 77 | bail!("Failed to find result for domain {}", name) 78 | } else { 79 | Ok(r) 80 | } 81 | }) 82 | } 83 | 84 | async fn lookup_raw(&self, mut message: Message) -> Result { 85 | let query = message 86 | .queries() 87 | .first() 88 | .to_owned() 89 | .ok_or_else(|| anyhow::anyhow!("Receive DNS request with no query item"))?; 90 | 91 | let result = self 92 | .client 93 | .lookup(query.name().clone(), query.query_type()) 94 | .await?; 95 | 96 | message 97 | .add_answers(result.record_iter().cloned()) 98 | .set_message_type(MessageType::Response); 99 | 100 | Ok(message) 101 | } 102 | } 103 | 104 | #[cfg(test)] 105 | mod tests { 106 | use super::*; 107 | use hickory_proto::xfer::Protocol; 108 | use hickory_proto::{ 109 | op::{MessageType, OpCode, Query}, 110 | rr::RecordType, 111 | }; 112 | use hickory_resolver::Name; 113 | use std::str::FromStr; 114 | 115 | #[tokio::test] 116 | async fn resolve() -> Result<()> { 117 | let resolver = HickoryResolver::new( 118 | vec![NameServerConfig { 119 | socket_addr: "8.8.8.8:53".parse().unwrap(), 120 | protocol: Protocol::Udp, 121 | tls_dns_name: None, 122 | http_endpoint: None, 123 | trust_negative_responses: true, 124 | bind_addr: None, 125 | }], 126 | Duration::from_secs(5), 127 | )?; 128 | 129 | assert!(!resolver.lookup_ip("apple.com").await?.is_empty()); 130 | assert!(!resolver.lookup_ipv4("apple.com").await?.is_empty()); 131 | assert!(!resolver.lookup_ipv6("facebook.com").await?.is_empty()); 132 | 133 | let mut message = Message::new(); 134 | message.set_op_code(OpCode::Query); 135 | message.set_message_type(MessageType::Query); 136 | let query = Query::query(Name::from_str("apple.com").unwrap(), RecordType::A); 137 | message.add_query(query); 138 | assert!(!resolver.lookup_raw(message).await?.answers().is_empty()); 139 | 140 | let mut message = Message::new(); 141 | message.set_op_code(OpCode::Query); 142 | message.set_message_type(MessageType::Query); 143 | let query = Query::query(Name::from_str("gmail.com").unwrap(), RecordType::MX); 144 | message.add_query(query); 145 | assert!(!resolver.lookup_raw(message).await?.answers().is_empty()); 146 | 147 | Ok(()) 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /core/src/core/resolver/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod hickory; 2 | pub mod system; 3 | 4 | use crate::Result; 5 | use anyhow::bail; 6 | use hickory_proto::op::Message; 7 | use std::{ 8 | fmt::Debug, 9 | net::{IpAddr, Ipv4Addr, Ipv6Addr}, 10 | vec::Vec, 11 | }; 12 | 13 | #[async_trait::async_trait] 14 | #[auto_impl::auto_impl(Arc)] 15 | pub trait Resolver: Debug + Send { 16 | async fn lookup_ip(&self, name: &str) -> Result>; 17 | async fn lookup_ipv4(&self, name: &str) -> Result>; 18 | async fn lookup_ipv6(&self, name: &str) -> Result>; 19 | async fn lookup_raw(&self, _message: Message) -> Result { 20 | bail!("Not implemented") 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /core/src/core/resolver/system.rs: -------------------------------------------------------------------------------- 1 | use super::Resolver; 2 | use crate::Result; 3 | use dns_lookup::{getaddrinfo, lookup_host, AddrFamily, AddrInfoHints, SockType}; 4 | use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; 5 | 6 | #[derive(Debug, Default)] 7 | pub struct SystemResolver {} 8 | 9 | #[async_trait::async_trait] 10 | impl Resolver for SystemResolver { 11 | async fn lookup_ip(&self, name: &str) -> Result> { 12 | let name = name.to_owned(); 13 | Ok(tokio::task::spawn_blocking(move || lookup_host(&name)).await??) 14 | } 15 | 16 | async fn lookup_ipv4(&self, name: &str) -> Result> { 17 | // We won't error out if we see an ipv6 address. 18 | Ok(self 19 | .lookup(name, AddrFamily::Inet) 20 | .await? 21 | .into_iter() 22 | .filter_map(|ip| match ip { 23 | IpAddr::V4(ip_) => Some(ip_), 24 | IpAddr::V6(_) => None, 25 | }) 26 | .collect::>()) 27 | } 28 | 29 | async fn lookup_ipv6(&self, name: &str) -> Result> { 30 | Ok(self 31 | .lookup(name, AddrFamily::Inet6) 32 | .await? 33 | .into_iter() 34 | .filter_map(|ip| match ip { 35 | IpAddr::V4(_) => None, 36 | IpAddr::V6(ip_) => Some(ip_), 37 | }) 38 | .collect::>()) 39 | } 40 | } 41 | 42 | impl SystemResolver { 43 | pub fn new() -> Self { 44 | Self {} 45 | } 46 | 47 | async fn lookup(&self, name: &str, family: AddrFamily) -> Result> { 48 | let hints = AddrInfoHints { 49 | socktype: SockType::Stream.into(), 50 | address: family.into(), 51 | ..AddrInfoHints::default() 52 | }; 53 | 54 | let name = name.to_owned(); 55 | Ok( 56 | tokio::task::spawn_blocking(move || getaddrinfo(Some(&name), None, Some(hints))) 57 | .await? 58 | .map_err(Into::::into)? 59 | .filter_map(|r| r.ok()) 60 | .map(|r| r.sockaddr.ip()) 61 | .collect(), 62 | ) 63 | } 64 | } 65 | 66 | #[cfg(test)] 67 | mod tests { 68 | use super::*; 69 | use rstest::*; 70 | 71 | #[rstest] 72 | #[case("localhost")] 73 | #[case("google.com")] 74 | #[tokio::test] 75 | async fn test_look_up_existing_domain(#[case] host: &str) { 76 | let resolver = SystemResolver::new(); 77 | 78 | let result = resolver.lookup_ip(host).await.unwrap(); 79 | assert!(!result.is_empty()); 80 | } 81 | 82 | #[rstest] 83 | #[case("t.test")] 84 | #[case("t.invalid")] 85 | #[tokio::test] 86 | async fn test_look_up_nonexisting_domain(#[case] host: &str) { 87 | let resolver = SystemResolver::new(); 88 | 89 | assert!(resolver.lookup_ip(host).await.is_err()); 90 | } 91 | 92 | #[rstest] 93 | #[case("localhost", Some("127.0.0.1"))] 94 | #[case("google.com", None)] 95 | #[tokio::test] 96 | async fn test_look_up_a_record(#[case] host: &str, #[case] expected: Option<&str>) { 97 | let resolver = SystemResolver::new(); 98 | 99 | let result = resolver.lookup_ipv4(host).await.unwrap(); 100 | assert!(!result.is_empty()); 101 | 102 | if let Some(expect) = expected { 103 | assert!(result 104 | .into_iter() 105 | .any(|x| x == expect.parse::().unwrap())); 106 | } 107 | } 108 | 109 | #[rstest] 110 | #[case("t.test")] 111 | #[case("t.invalid")] 112 | #[tokio::test] 113 | async fn test_look_up_nonexisting_domain_for_a_record(#[case] host: &str) { 114 | let resolver = SystemResolver::new(); 115 | 116 | assert!(resolver.lookup_ipv4(host).await.is_err()); 117 | } 118 | 119 | // Surprisingly, we cannot use getaddrinfo to do AAAA query on Windows! And 120 | // it's not documented. But we can find similar issue here 121 | // https://stackoverflow.com/questions/66755681/getaddrinfo-c-on-windows-not-handling-ipv6-correctly-returning-error-code-1 122 | // and it seems there is no fix. An article suggests setting up Teredo would 123 | // fix this on Windows 7. 124 | // http://netscantools.blogspot.co.uk/2011/06/ipv6-teredo-problems-and-solutions-on.html 125 | // Obviously it not a solution but a bug Microsoft probably not going to 126 | // fix. 127 | // 128 | // But it should still be ok as long as IPv4 is still available, just we 129 | // won't be able to use IPv6 on Windows. 130 | // 131 | // But I'm not using Windows, so it's fine for me and I won't try to fix it 132 | // anymore. 133 | // 134 | // Anyway, PR is always welcomed. 135 | #[cfg(not(target_os = "windows"))] 136 | #[rstest] 137 | #[case("localhost", Some("::1"))] 138 | #[case("google.com", None)] 139 | #[tokio::test] 140 | async fn test_look_up_aaaa_record(#[case] host: &str, #[case] expected: Option<&str>) { 141 | let resolver = SystemResolver::new(); 142 | 143 | let result = resolver.lookup_ipv6(host).await.unwrap(); 144 | assert!(!result.is_empty()); 145 | 146 | if let Some(expect) = expected { 147 | assert!(result 148 | .into_iter() 149 | .any(|x| x == expect.parse::().unwrap())); 150 | } 151 | } 152 | 153 | #[rstest] 154 | #[case("t.test")] 155 | #[case("t.invalid")] 156 | #[tokio::test] 157 | async fn test_look_up_nonexisting_domain_for_aaaa_record(#[case] host: &str) { 158 | let resolver = SystemResolver::new(); 159 | 160 | assert!(resolver.lookup_ipv6(host).await.is_err()); 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /core/src/core/simplex/client.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use crate::{ 4 | core::{ 5 | endpoint::Endpoint, 6 | io::Io, 7 | simplex::{io::into_io, Config, ENDPOINT_HEADER_KEY}, 8 | }, 9 | Result, 10 | }; 11 | use anyhow::Context; 12 | use http::HeaderName; 13 | use tokio_tungstenite::client_async; 14 | use tungstenite::client::IntoClientRequest; 15 | 16 | pub async fn connect(io: I, endpoint: &Endpoint, config: &Config) -> Result { 17 | let uri = http::uri::Builder::new() 18 | .authority(config.host.clone()) 19 | .scheme("ws") 20 | .path_and_query(&config.path) 21 | .build() 22 | .with_context(|| { 23 | format!( 24 | "Failed to create simplex request URI connecting with server: {} and path: {}", 25 | &config.host, &config.path 26 | ) 27 | })?; 28 | 29 | let mut request = uri.into_client_request()?; 30 | request.headers_mut().insert( 31 | HeaderName::from_str(&config.secret_header.0)?, 32 | config.secret_header.1.parse()?, 33 | ); 34 | 35 | request 36 | .headers_mut() 37 | .insert(ENDPOINT_HEADER_KEY, endpoint.to_string().parse()?); 38 | 39 | let (stream, _response) = client_async(request, io) 40 | .await 41 | .context("Websocket handshaked failed when establishing simplex connection")?; 42 | 43 | Ok(into_io(stream)) 44 | } 45 | -------------------------------------------------------------------------------- /core/src/core/simplex/io.rs: -------------------------------------------------------------------------------- 1 | use crate::core::io::Io; 2 | use bytes::{Buf, Bytes}; 3 | use futures::{stream::TryStreamExt, task::AtomicWaker, SinkExt}; 4 | use std::task::Poll; 5 | use tokio::io::{AsyncBufRead, AsyncRead}; 6 | use tokio_tungstenite::{ 7 | tungstenite::{error::Error as WsError, Message}, 8 | WebSocketStream, 9 | }; 10 | 11 | lazy_static::lazy_static! { 12 | static ref EOF_MESSAGE: Message = Message::Text("EOF".into()); 13 | } 14 | 15 | pub fn into_io(stream: WebSocketStream) -> impl Io { 16 | WebSocketStreamToAsyncWrite::new(stream) 17 | } 18 | 19 | #[derive(Debug)] 20 | #[pin_project::pin_project] 21 | pub struct WebSocketStreamToAsyncWrite { 22 | stream: WebSocketStream, 23 | read_closed: bool, 24 | write_closed: bool, 25 | waker: AtomicWaker, 26 | chunk: Option, 27 | } 28 | 29 | impl WebSocketStreamToAsyncWrite { 30 | pub fn new(stream: WebSocketStream) -> Self { 31 | Self { 32 | stream, 33 | read_closed: false, 34 | write_closed: false, 35 | waker: AtomicWaker::default(), 36 | chunk: None, 37 | } 38 | } 39 | } 40 | 41 | fn ws_to_io_error(error: WsError) -> std::io::Error { 42 | std::io::Error::other(error) 43 | } 44 | 45 | fn is_eof(message: &Message) -> bool { 46 | message == &*EOF_MESSAGE 47 | } 48 | 49 | // Here we implement futures AsyncWrite and then use Compat to support tokio's. 50 | impl tokio::io::AsyncWrite for WebSocketStreamToAsyncWrite { 51 | fn poll_write( 52 | self: std::pin::Pin<&mut Self>, 53 | cx: &mut std::task::Context<'_>, 54 | buf: &[u8], 55 | ) -> std::task::Poll> { 56 | let stream = self.project().stream; 57 | 58 | let result = futures::ready!(stream.poll_ready_unpin(cx)); 59 | 60 | // TODO: There could be a better way to handle this, we are making copies. 61 | std::task::Poll::Ready( 62 | result 63 | .and_then(|_| { 64 | stream 65 | .start_send_unpin(Message::Binary(buf.to_owned().into())) 66 | .map(|_| buf.len()) 67 | }) 68 | .map_err(ws_to_io_error), 69 | ) 70 | } 71 | 72 | fn poll_flush( 73 | self: std::pin::Pin<&mut Self>, 74 | cx: &mut std::task::Context<'_>, 75 | ) -> std::task::Poll> { 76 | let stream = self.project().stream; 77 | 78 | let result = futures::ready!(stream.poll_flush_unpin(cx)); 79 | 80 | Poll::Ready(result.map_err(ws_to_io_error)) 81 | } 82 | 83 | fn poll_shutdown( 84 | self: std::pin::Pin<&mut Self>, 85 | cx: &mut std::task::Context<'_>, 86 | ) -> Poll> { 87 | // We know now write is done. 88 | // 89 | // However, if we trigger close right now, the ws implementation would 90 | // send a close frame and the other side would immediately stop sending 91 | // any new data (other than the ones already queued). 92 | // 93 | // In order to avoid that, we need to implement our own way of sending 94 | // EOF. 95 | 96 | let this = self.project(); 97 | let stream = this.stream; 98 | 99 | if !*this.write_closed { 100 | // Write is not closed, so we need to send EOF first. 101 | let result = futures::ready!(stream.poll_ready_unpin(cx)); 102 | let result = result 103 | .and_then(|_| stream.start_send_unpin(EOF_MESSAGE.clone())) 104 | .map_err(ws_to_io_error); 105 | 106 | if let Err(e) = result { 107 | return Poll::Ready(Err(e)); 108 | } 109 | 110 | // We send it successfully, mark the write as closed. 111 | *this.write_closed = true; 112 | } 113 | 114 | if *this.read_closed { 115 | // We can close the connection now. 116 | let result = futures::ready!(stream.poll_close_unpin(cx)); 117 | 118 | Poll::Ready(result.map_err(ws_to_io_error)) 119 | } else { 120 | // Wait for the read side to receive EOF. 121 | this.waker.register(cx.waker()); 122 | Poll::Pending 123 | } 124 | } 125 | } 126 | 127 | impl AsyncBufRead for WebSocketStreamToAsyncWrite { 128 | fn poll_fill_buf( 129 | self: std::pin::Pin<&mut Self>, 130 | cx: &mut std::task::Context<'_>, 131 | ) -> Poll> { 132 | let this = self.project(); 133 | let stream = this.stream; 134 | 135 | if this.chunk.is_none() { 136 | let message = futures::ready!(stream.try_poll_next_unpin(cx)); 137 | 138 | match message { 139 | Some(m) => match m { 140 | Ok(m) => { 141 | if is_eof(&m) { 142 | *this.read_closed = true; 143 | if *this.write_closed { 144 | this.waker.wake(); 145 | } 146 | Poll::Ready(Ok(&[])) 147 | } else { 148 | *this.chunk = Some(m.into_data()); 149 | let chunk = this.chunk.as_ref().unwrap(); 150 | Poll::Ready(Ok(chunk)) 151 | } 152 | } 153 | Err(err) => Poll::Ready(Err(ws_to_io_error(err))), 154 | }, 155 | None => { 156 | if !*this.read_closed { 157 | // Somehow real EOF came before EOF command 158 | *this.read_closed = true; 159 | if *this.write_closed { 160 | this.waker.wake(); 161 | } 162 | } 163 | 164 | Poll::Ready(Ok(&[])) 165 | } 166 | } 167 | } else { 168 | let chunk = this.chunk.as_ref().unwrap(); 169 | Poll::Ready(Ok(chunk)) 170 | } 171 | } 172 | 173 | fn consume(self: std::pin::Pin<&mut Self>, amt: usize) { 174 | let chunk = self.project().chunk; 175 | 176 | if amt > 0 { 177 | chunk.as_mut().expect("No check present").advance(amt); 178 | 179 | if chunk.as_ref().unwrap().is_empty() { 180 | *chunk = None; 181 | } 182 | } 183 | } 184 | } 185 | 186 | impl AsyncRead for WebSocketStreamToAsyncWrite { 187 | fn poll_read( 188 | mut self: std::pin::Pin<&mut Self>, 189 | cx: &mut std::task::Context<'_>, 190 | buf: &mut tokio::io::ReadBuf<'_>, 191 | ) -> Poll> { 192 | if buf.remaining() == 0 { 193 | return Poll::Ready(Ok(())); 194 | } 195 | 196 | let inner_buf = match self.as_mut().poll_fill_buf(cx) { 197 | Poll::Ready(Ok(buf)) => buf, 198 | Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), 199 | Poll::Pending => return Poll::Pending, 200 | }; 201 | let len = std::cmp::min(inner_buf.len(), buf.remaining()); 202 | buf.put_slice(&inner_buf[..len]); 203 | 204 | self.consume(len); 205 | Poll::Ready(Ok(())) 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /core/src/core/simplex/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod io; 3 | pub mod server; 4 | 5 | static ENDPOINT_HEADER_KEY: &str = "Simplex-Endpoint"; 6 | 7 | #[derive(Debug, Clone)] 8 | pub struct Config { 9 | host: String, 10 | path: String, 11 | secret_header: (String, String), 12 | } 13 | 14 | impl Config { 15 | pub fn new(host: String, path: String, secret_header: (String, String)) -> Self { 16 | Self { 17 | host, 18 | path, 19 | secret_header, 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /core/src/core/simplex/server.rs: -------------------------------------------------------------------------------- 1 | use super::{io::into_io, Config, ENDPOINT_HEADER_KEY}; 2 | use crate::{ 3 | core::{endpoint::Endpoint, io::Io}, 4 | Result, 5 | }; 6 | use anyhow::{anyhow, bail, ensure, Context}; 7 | use bytes::{Buf, Bytes}; 8 | use chrono::Utc; 9 | use futures::{Future, FutureExt}; 10 | use http_body_util::Full; 11 | use hyper::{body::Incoming, server::conn::http1::Builder, service::service_fn, Request, Response}; 12 | use hyper_tungstenite::{ 13 | is_upgrade_request, 14 | tungstenite::{error::ProtocolError, handshake::derive_accept_key, protocol::Role}, 15 | WebSocketStream, 16 | }; 17 | use hyper_util::rt::TokioIo; 18 | use std::sync::Arc; 19 | use tokio::{ 20 | io::{AsyncRead, AsyncWrite}, 21 | sync::{ 22 | oneshot::{channel, Receiver, Sender}, 23 | Mutex, 24 | }, 25 | }; 26 | use tracing::info; 27 | 28 | async fn hide_error_handler( 29 | request: Request, 30 | config: Config, 31 | signal: Arc>>, 32 | ) -> Result>> { 33 | let result = handler(request, config, signal).await; 34 | 35 | match result { 36 | Ok(response) => Ok(response), 37 | Err(err) => { 38 | info!( 39 | "Failed to process incoming simplex request. \ 40 | It's most likely the client is not a \ 41 | valid simplex client or the configuration is wrong. \ 42 | Hiding the error for security reasons. Error: {}", 43 | err 44 | ); 45 | 46 | Ok(Response::new(Full::new(Bytes::from(format!( 47 | "Now is {}", 48 | Utc::now().to_rfc3339() 49 | ))))) 50 | } 51 | } 52 | } 53 | 54 | async fn handler( 55 | request: Request, 56 | config: Config, 57 | signal: Arc>>, 58 | ) -> Result>> { 59 | // Check if the request is requesting the right path 60 | ensure!( 61 | request.uri().path() == config.path, 62 | "Got a simplex request to wrong path: {}", 63 | request.uri().path() 64 | ); 65 | 66 | ensure!( 67 | request 68 | .headers() 69 | .get(config.secret_header.0.as_str()) 70 | .and_then(|v| v.to_str().ok()) 71 | == Some(config.secret_header.1.as_str()), 72 | "Got a simplex request with wrong secret header value." 73 | ); 74 | 75 | ensure!( 76 | is_upgrade_request(&request), 77 | "Got a non upgrade request when simplex request is expected" 78 | ); 79 | 80 | let endpoint = request 81 | .headers() 82 | .get(ENDPOINT_HEADER_KEY) 83 | .and_then(|ep| ep.to_str().ok()) 84 | .and_then(|ep| ep.parse().ok()) 85 | .ok_or_else(|| anyhow!("Failed to find valid target endpoint from simplex request"))?; 86 | 87 | let upgrade_signal = signal 88 | .lock() 89 | .await 90 | .take() 91 | .expect("there should be only one upgrade request for one connection"); 92 | 93 | upgrade_signal 94 | .endpoint_tx 95 | .send(endpoint) 96 | .expect("the other side should not be released"); 97 | 98 | upgrade_signal 99 | .done_rx 100 | .await 101 | .expect("the done signal should be sent before polling the connection"); 102 | 103 | let response = 104 | upgrade_response(&request).context("Failed to create websocket upgrade response")?; 105 | 106 | Ok(response) 107 | } 108 | 109 | // From hyper_tungstenite 110 | fn upgrade_response(request: &Request) -> Result>> { 111 | let key = request 112 | .headers() 113 | .get("Sec-WebSocket-Key") 114 | .ok_or(ProtocolError::MissingSecWebSocketKey)?; 115 | 116 | if request 117 | .headers() 118 | .get("Sec-WebSocket-Version") 119 | .map(|v| v.as_bytes()) 120 | != Some(b"13") 121 | { 122 | return Err(ProtocolError::MissingSecWebSocketVersionHeader.into()); 123 | } 124 | 125 | Ok(Response::builder() 126 | .status(hyper::StatusCode::SWITCHING_PROTOCOLS) 127 | .header(hyper::header::CONNECTION, "upgrade") 128 | .header(hyper::header::UPGRADE, "websocket") 129 | .header("Sec-WebSocket-Accept", &derive_accept_key(key.as_bytes())) 130 | .body(Full::new(Bytes::from("switching to websocket protocol"))) 131 | .expect("bug: failed to build response")) 132 | } 133 | 134 | struct UpgradeSignal { 135 | endpoint_tx: Sender, 136 | done_rx: Receiver<()>, 137 | } 138 | 139 | pub async fn handshake( 140 | io: impl Io, 141 | config: Config, 142 | ) -> Result<(Endpoint, impl Future>)> { 143 | let (done_tx, done_rx) = channel(); 144 | let (endpoint_tx, endpoint_rx) = channel(); 145 | 146 | let signal = Arc::new(Mutex::new(Some(UpgradeSignal { 147 | endpoint_tx, 148 | done_rx, 149 | }))); 150 | 151 | let conn = Builder::new().serve_connection( 152 | TokioIo::new(io), 153 | service_fn(move |req| { 154 | let config = config.clone(); 155 | let signal = signal.clone(); 156 | // We need to pin the future here so the `conn` is `Unpin`able. 157 | hide_error_handler(req, config, signal).boxed() 158 | }), 159 | ); 160 | 161 | let mut conn_fut = conn.without_shutdown(); 162 | 163 | let endpoint = tokio::select! { 164 | _ = &mut conn_fut => { 165 | // No upgrade happens. The client isn't a 166 | // simplex client; 167 | bail!("The client is not a valid simplex client"); 168 | } 169 | result = endpoint_rx => { 170 | match result { 171 | Ok(endpoint) => endpoint, 172 | Err(_) => unreachable!(), 173 | } 174 | } 175 | }; 176 | 177 | info!("Got connection request to {}", endpoint); 178 | 179 | Ok((endpoint, async move { 180 | // This should never error since we are not polling the other side, so 181 | // the receiver should not be deallocated. 182 | done_tx 183 | .send(()) 184 | .expect("bug: the done signal receiver should not be deallocated"); 185 | let part = conn_fut.await?; 186 | 187 | let ws_stream = WebSocketStream::from_raw_socket( 188 | ChainReadBufAndIo { 189 | read_buf: part.read_buf, 190 | io: part.io.into_inner(), 191 | }, 192 | Role::Server, 193 | None, 194 | ) 195 | .await; 196 | 197 | Ok(into_io(ws_stream)) 198 | })) 199 | } 200 | 201 | #[derive(Debug)] 202 | #[pin_project::pin_project] 203 | struct ChainReadBufAndIo { 204 | // TODO: Make this Option<> 205 | read_buf: Bytes, 206 | #[pin] 207 | io: I, 208 | } 209 | 210 | impl AsyncRead for ChainReadBufAndIo { 211 | fn poll_read( 212 | self: std::pin::Pin<&mut Self>, 213 | cx: &mut std::task::Context<'_>, 214 | buf: &mut tokio::io::ReadBuf<'_>, 215 | ) -> std::task::Poll> { 216 | let this = self.project(); 217 | 218 | if !this.read_buf.is_empty() { 219 | let len = this.read_buf.len().min(buf.remaining()); 220 | buf.put_slice(&this.read_buf.slice(0..len)); 221 | this.read_buf.advance(len); 222 | return std::task::Poll::Ready(Ok(())); 223 | } 224 | 225 | this.io.poll_read(cx, buf) 226 | } 227 | } 228 | 229 | impl AsyncWrite for ChainReadBufAndIo { 230 | fn poll_write( 231 | self: std::pin::Pin<&mut Self>, 232 | cx: &mut std::task::Context<'_>, 233 | buf: &[u8], 234 | ) -> std::task::Poll> { 235 | self.project().io.poll_write(cx, buf) 236 | } 237 | 238 | fn poll_flush( 239 | self: std::pin::Pin<&mut Self>, 240 | cx: &mut std::task::Context<'_>, 241 | ) -> std::task::Poll> { 242 | self.project().io.poll_flush(cx) 243 | } 244 | 245 | fn poll_shutdown( 246 | self: std::pin::Pin<&mut Self>, 247 | cx: &mut std::task::Context<'_>, 248 | ) -> std::task::Poll> { 249 | self.project().io.poll_shutdown(cx) 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /core/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use anyhow::{Error, Result}; 2 | 3 | pub mod config; 4 | pub mod core; 5 | -------------------------------------------------------------------------------- /snapcraft.yaml: -------------------------------------------------------------------------------- 1 | name: dandelion 2 | version: git 3 | summary: A fully programmable proxy 4 | description: | 5 | A proxy where the rule can be defined with dynamic language for extreme flexibility 6 | 7 | grade: stable 8 | base: core22 9 | confinement: strict 10 | 11 | parts: 12 | dandelion: 13 | plugin: rust 14 | rust-channel: stable 15 | source: core/ 16 | build-packages: 17 | - pkg-config 18 | - libssl-dev 19 | 20 | apps: 21 | dandelion: 22 | command: bin/dandelion 23 | daemon: simple 24 | plugs: 25 | - network 26 | - network-bind 27 | --------------------------------------------------------------------------------