├── .cargo └── config.toml ├── .github └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── .vscode ├── launch.json ├── settings.json └── tasks.json ├── Cargo.lock ├── Cargo.toml ├── Cross.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.adoc ├── add-tag.sh ├── bench ├── docker-compose.yml ├── http-server │ ├── Dockerfile │ ├── create-large-file.sh │ └── main.py ├── seeker │ └── config.yml ├── shadowsocks │ └── resolv.conf └── ss-server │ └── resolv.conf ├── ci ├── cargo-out-dir ├── macos-install-packages └── ubuntu-install-packages ├── config ├── Cargo.toml └── src │ ├── lib.rs │ ├── rule.rs │ └── server_config.rs ├── crypto ├── Cargo.toml └── src │ ├── aead.rs │ ├── cipher.rs │ ├── digest.rs │ ├── dummy.rs │ ├── lib.rs │ ├── openssl.rs │ ├── ring.rs │ ├── siv.rs │ ├── sodium.rs │ ├── stream.rs │ └── table.rs ├── dnsserver ├── Cargo.toml └── src │ ├── lib.rs │ └── resolver.rs ├── hermesdns ├── Cargo.toml └── src │ ├── dns │ ├── authority.rs │ ├── buffer.rs │ ├── cache.rs │ ├── client.rs │ ├── context.rs │ ├── mod.rs │ ├── protocol.rs │ ├── resolve.rs │ └── server.rs │ ├── hosts.rs │ └── lib.rs ├── http_proxy_client ├── Cargo.toml └── src │ ├── http.rs │ ├── https.rs │ └── lib.rs ├── sample_config.yml ├── seeker ├── Cargo.toml └── src │ ├── config_encryptor.rs │ ├── config_watcher.rs │ ├── dns_client.rs │ ├── group_servers_chooser.rs │ ├── logger.rs │ ├── macros.rs │ ├── main.rs │ ├── probe_connectivity.rs │ ├── proxy_client.rs │ ├── proxy_connection.rs │ ├── proxy_tcp_stream.rs │ ├── proxy_udp_socket.rs │ ├── relay_tcp_stream.rs │ ├── relay_udp_socket.rs │ ├── server_chooser.rs │ ├── server_performance.rs │ └── traffic.rs ├── socks5_client ├── Cargo.toml ├── examples │ └── udp_echo.rs └── src │ ├── lib.rs │ ├── tcp.rs │ ├── types.rs │ └── udp.rs ├── ssclient ├── Cargo.toml └── src │ ├── lib.rs │ ├── tcp_io.rs │ ├── tcp_io │ ├── aead.rs │ └── stream.rs │ ├── udp_io.rs │ └── udp_io │ └── crypto_io.rs ├── store ├── Cargo.toml └── src │ ├── config.rs │ ├── connections.rs │ ├── dns.rs │ └── lib.rs ├── sysconfig ├── Cargo.toml └── src │ ├── command.rs │ ├── iptables.rs │ ├── lib.rs │ ├── net │ ├── darwin.rs │ ├── linux.rs │ └── mod.rs │ ├── proc │ ├── darwin.rs │ ├── linux.rs │ └── mod.rs │ └── ulimit.rs ├── tcp_connection ├── Cargo.toml └── src │ ├── lib.rs │ ├── obfs_http.rs │ └── obfs_tls.rs └── tun_nat ├── Cargo.toml ├── examples └── echo.rs └── src ├── lib.rs └── tun_socket ├── mod.rs ├── tun_darwin.rs └── tun_linux.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.x86_64-unknown-linux-gnu] 2 | runner = 'sudo -E' 3 | 4 | [target.x86_64-apple-darwin] 5 | runner = 'sudo -E' 6 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | pull_request: 4 | push: 5 | branches: 6 | - master 7 | # schedule: 8 | # - cron: '00 01 * * *' 9 | 10 | concurrency: 11 | group: ${{ github.head_ref }} || ${{ github.ref }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | rustfmt: 16 | name: rustfmt 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Checkout repository 20 | uses: actions/checkout@v2 21 | 22 | - name: Install Rust 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | toolchain: stable 26 | override: true 27 | profile: minimal 28 | components: rustfmt 29 | 30 | - name: Check formatting 31 | run: | 32 | cargo fmt --all -- --check 33 | 34 | - name: Check clippy 35 | run: | 36 | cargo clippy 37 | 38 | test: 39 | name: test 40 | needs: ["rustfmt"] 41 | env: 42 | # For some builds, we use cross to test on 32-bit and big-endian 43 | # systems. 44 | CARGO: cargo 45 | # When CARGO is set to CROSS, this is set to `--target matrix.target`. 46 | TARGET_FLAGS: 47 | # When CARGO is set to CROSS, TARGET_DIR includes matrix.target. 48 | TARGET_DIR: ./target 49 | # Emit backtraces on panics. 50 | RUST_BACKTRACE: 1 51 | OPENSSL_STATIC: yes 52 | DNS: 8.8.8.8 53 | runs-on: ${{ matrix.os }} 54 | strategy: 55 | matrix: 56 | include: 57 | - build: stable 58 | os: ubuntu-latest 59 | rust: stable 60 | - build: nightly 61 | os: ubuntu-latest 62 | rust: nightly 63 | - build: nightly-musl 64 | os: ubuntu-latest 65 | rust: nightly 66 | target: x86_64-unknown-linux-musl 67 | - build: macos 68 | os: macos-latest 69 | rust: nightly 70 | - build: nightly-armv7 71 | os: ubuntu-latest 72 | rust: nightly 73 | target: armv7-unknown-linux-musleabi 74 | - build: nightly-armv5 75 | os: ubuntu-latest 76 | rust: nightly 77 | target: armv5te-unknown-linux-musleabi 78 | # - build: beta 79 | # os: ubuntu-latest 80 | # rust: beta 81 | # - build: nightly-32 82 | # os: ubuntu-latest 83 | # rust: nightly 84 | # target: i686-unknown-linux-gnu 85 | # - build: nightly-mips 86 | # os: ubuntu-latest 87 | # rust: nightly 88 | # target: mips64-unknown-linux-gnuabi64 89 | # - build: win-msvc 90 | # os: windows-2019 91 | # rust: nightly 92 | # - build: win-gnu 93 | # os: windows-2019 94 | # rust: nightly-x86_64-gnu 95 | steps: 96 | - name: Checkout repository 97 | uses: actions/checkout@v2 98 | 99 | # - name: Setup upterm session 100 | # uses: lhotari/action-upterm@v1 101 | 102 | - name: Install packages (Ubuntu) 103 | if: matrix.os == 'ubuntu-latest' 104 | run: | 105 | # Disable TCP/UDP offload 106 | sudo ethtool -K eth0 tx off rx off 107 | 108 | sudo ci/ubuntu-install-packages 109 | 110 | - name: Install packages (macOS) 111 | if: matrix.os == 'macos-latest' 112 | run: | 113 | # Disable TCP/UDP offload 114 | sudo sysctl -w net.link.generic.system.hwcksum_tx=0 115 | sudo sysctl -w net.link.generic.system.hwcksum_rx=0 116 | 117 | sudo ci/macos-install-packages 118 | 119 | - name: Install Rust 120 | uses: actions-rs/toolchain@v1 121 | with: 122 | toolchain: ${{ matrix.rust }} 123 | profile: minimal 124 | override: true 125 | 126 | - name: Use Cross 127 | if: matrix.target != '' 128 | run: | 129 | cargo install cross --git https://github.com/cross-rs/cross 130 | echo "CARGO=cross" >> $GITHUB_ENV 131 | echo "TARGET_FLAGS=--target ${{ matrix.target }}" >> $GITHUB_ENV 132 | echo "TARGET_DIR=./target/${{ matrix.target }}" >> $GITHUB_ENV 133 | 134 | - name: Show command used for Cargo 135 | run: | 136 | echo "cargo command is: ${{ env.CARGO }}" 137 | echo "target flag is: ${{ env.TARGET_FLAGS }}" 138 | 139 | - name: Build cross docker images 140 | if: matrix.target != '' 141 | run: | 142 | [ -d ci/docker/${{ matrix.target }} ] && cd ci/docker/${{ matrix.target }} && ./build || true 143 | 144 | - name: Build all crates 145 | run: ${{ env.CARGO }} build --all ${{ env.TARGET_FLAGS }} 146 | 147 | # This is useful for debugging problems when the expected build artifacts 148 | # (like shell completions and man pages) aren't generated. 149 | # - name: Show build.rs stderr 150 | # shell: bash 151 | # run: | 152 | # set +x 153 | # stderr="$(find "${{ env.TARGET_DIR }}/debug" -name stderr -print0 | xargs -0 ls -t | head -n1)" 154 | # if [ -s "$stderr" ]; then 155 | # echo "===== $stderr ===== " 156 | # cat "$stderr" 157 | # echo "=====" 158 | # fi 159 | # set -x 160 | 161 | - name: Run tests (without cross) 162 | if: matrix.target == '' 163 | run: ${{ env.CARGO }} test --all 164 | 165 | - name: Run tests (with cross) 166 | if: matrix.target != '' 167 | run: ${{ env.CARGO }} test --all ${{ env.TARGET_FLAGS }} 168 | # - name: Test for existence of build artifacts (Windows) 169 | # if: matrix.os == 'windows-2019' 170 | # shell: bash 171 | # run: | 172 | # outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")" 173 | # ls "$outdir/_rg.ps1" && file "$outdir/_rg.ps1" 174 | # - name: Test for existence of build artifacts (Unix) 175 | # if: matrix.os != 'windows-2019' 176 | # shell: bash 177 | # run: | 178 | # outdir="$(ci/cargo-out-dir "${{ env.TARGET_DIR }}")" 179 | # # TODO: Check for the man page generation here. For whatever reason, 180 | # # it seems to be intermittently failing in CI. No idea why. 181 | # # for f in rg.bash rg.fish rg.1; do 182 | # for f in rg.bash rg.fish; do 183 | # # We could use file -E here, but it isn't supported on macOS. 184 | # ls "$outdir/$f" && file "$outdir/$f" 185 | # done 186 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | config.json 4 | Procfile 5 | /dns.db 6 | /config.yml 7 | /log 8 | .idea 9 | .DS_Store 10 | /ci/docker/*/stage/ 11 | *.log 12 | *.log.* 13 | trace-*.json 14 | geoip.mmdb 15 | *.sqlite 16 | *.sqlite-journal 17 | *.sqlite-shm 18 | *.sqlite-wal 19 | bench/http-server/large_file.bin 20 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "lldb", 9 | "request": "launch", 10 | "name": "Debug", 11 | "program": "${workspaceFolder}/", 12 | "args": [], 13 | "cwd": "${workspaceFolder}" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "git.ignoreLimitWarning": true 3 | } -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "type": "cargo", 6 | "command": "clippy", 7 | "problemMatcher": [ 8 | "$rustc" 9 | ], 10 | "group": "build", 11 | "label": "rust: cargo clippy" 12 | }, 13 | { 14 | "type": "cargo", 15 | "command": "fix", 16 | "problemMatcher": [ 17 | "$rustc" 18 | ], 19 | "group": "build", 20 | "label": "rust: cargo fix" 21 | }, 22 | { 23 | "type": "cargo", 24 | "command": "test", 25 | "problemMatcher": [ 26 | "$rustc" 27 | ], 28 | "group": "build", 29 | "args": [ 30 | "--", 31 | "--nocapture" 32 | ], 33 | "label": "rust: cargo test" 34 | } 35 | ] 36 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "seeker", 4 | "dnsserver", 5 | "ssclient", 6 | "sysconfig", 7 | "config", 8 | "crypto", 9 | "hermesdns", 10 | "socks5_client", 11 | "tun_nat", 12 | "http_proxy_client", 13 | "tcp_connection", 14 | "store", 15 | ] 16 | resolver = "2" 17 | 18 | [profile.release] 19 | lto = "thin" 20 | codegen-units = 1 21 | incremental = false 22 | strip = true 23 | debug = true 24 | 25 | # The profile that 'cargo dist' will build with 26 | [profile.dist] 27 | inherits = "release" 28 | lto = "thin" 29 | 30 | [workspace.metadata.workspaces] 31 | no_individual_tags = true 32 | independent = false 33 | 34 | # Config for 'cargo dist' 35 | [workspace.metadata.dist] 36 | # The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax) 37 | cargo-dist-version = "0.21.0" 38 | # CI backends to support 39 | ci = "github" 40 | # The installers to generate for each app 41 | installers = ["shell"] 42 | # Target platforms to build apps for (Rust target-triple syntax) 43 | targets = [ 44 | "aarch64-apple-darwin", 45 | "x86_64-apple-darwin", 46 | "x86_64-unknown-linux-gnu", 47 | "x86_64-unknown-linux-musl", 48 | ] 49 | # Path that installers should place binaries in 50 | install-path = "CARGO_HOME" 51 | # Whether to install an updater program 52 | install-updater = false 53 | 54 | [workspace.metadata.release] 55 | shared-version = true 56 | tag-name = "v{{version}}" 57 | publish = false 58 | 59 | [workspace.dependencies] 60 | anyhow = "1.0" 61 | async-std = { version = "1", features = ["default", "io_safety"] } 62 | async-std-resolver = "0.24" 63 | async-trait = "0.1" 64 | bitvec = "1.0" 65 | byte_string = "1.0" 66 | byteorder = "1.4" 67 | bytes = "1.2" 68 | cfg-if = "1.0" 69 | chrono = "0.4" 70 | clap = "4" 71 | ctrlc = "3.0" 72 | digest = "0.10" 73 | dyn-clone = "1.0" 74 | file-rotate = "0.7" 75 | futures-util = "0.3" 76 | hkdf = "0.12" 77 | libc = "0.2" 78 | libsodium-sys-stable = "1.19" 79 | maxminddb = "0.24" 80 | md-5 = "0.10" 81 | memchr = "2.5" 82 | nanorand = "0.7" 83 | nix = "0.29" 84 | once_cell = "1.16" 85 | openssl = "0.10" 86 | os_socketaddr = "0.2" 87 | parking_lot = "0.12.1" 88 | percent-encoding = "2.1" 89 | rand = "0.8" 90 | ring = "0.17" 91 | rusqlite = "0.32" 92 | serde = "1.0" 93 | serde_yaml = "0.9" 94 | sha-1 = "0.10" 95 | smoltcp = { version = "0.12", default-features = false } 96 | tempfile = "3.3" 97 | testcontainers = { version = "0.23", features = [ 98 | "blocking", 99 | ], default-features = false } 100 | tracing = "0.1" 101 | tracing-chrome = "0.7" 102 | tracing-futures = { version = "0.2", default-features = false } 103 | tracing-subscriber = "0.3" 104 | typenum = "1.15" 105 | ureq = "2.5" 106 | url = "2.3" 107 | async-tls = "0.13" 108 | base64 = "0.20" 109 | notify-debouncer-mini = "0.5" 110 | [patch.crates-io] 111 | tracing-chrome = { git = "https://github.com/gfreezy/tracing-chrome", rev = "2a3dbfe" } 112 | -------------------------------------------------------------------------------- /Cross.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | pre-build = "ci/ubuntu-install-packages" 3 | 4 | [build.env] 5 | passthrough = ["RUST_BACKTRACE", "RUST_LOG", "DNS"] 6 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright 2020 gfreezy 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README.adoc: -------------------------------------------------------------------------------- 1 | = Seeker 2 | 3 | image::https://github.com/gfreezy/seeker/actions/workflows/release.yml/badge.svg[] 4 | image::https://github.com/gfreezy/seeker/actions/workflows/ci.yml/badge.svg?branch=master[] 5 | 6 | 7 | 使用 Tun 实现透明代理(支持 Mac & Linux 系统),支持 TCP、UDP。 8 | 9 | == Slack 10 | https://join.slack.com/t/allsunday/shared_invite/zt-f8xw3uzl-qchMa2jQOfQF1T89w3lfiw 11 | 12 | == Download 13 | 访问 https://github.com/gfreezy/seeker/releases 下载最新 release 14 | 15 | [source,bash] 16 | ---- 17 | chmod +x seeker-osx # or chmod+x seeker-linux 18 | ---- 19 | == Usage 20 | 21 | 1. 启动 `seeker` 22 | + 23 | [source,bash] 24 | ---- 25 | Seeker 0.5.0 26 | gfreezy 27 | Tun to Shadowsockets proxy. https://github.com/gfreezy/seeker 28 | 29 | USAGE: 30 | seeker [FLAGS] [OPTIONS] 31 | 32 | FLAGS: 33 | --encrypt Encrypt config file and output to terminal 34 | -h, --help Prints help information 35 | -V, --version Prints version information 36 | 37 | OPTIONS: 38 | -c, --config Sets config file. Sample config at 39 | https://github.com/gfreezy/seeker/blob/master/sample_config.yml 40 | --config-url URL to config 41 | --key Key for encryption/decryption 42 | -l, --log Log file 43 | -u, --uid User id to proxy 44 | ---- 45 | + 46 | 本地配置文件启动 47 | + 48 | [source,bash] 49 | ---- 50 | sudo seeker --config path/to/config.yml 51 | ---- 52 | + 53 | 远程配置文件启动 54 | + 55 | [source,bash] 56 | ---- 57 | sudo seeker --config-url https://pastebin.com/raw/config --key encrypt-key 58 | ---- 59 | + 60 | 生成远程配置文件 61 | + 62 | [source,bash] 63 | ---- 64 | sudo seeker --config path/to/config.yml --encrypt --key encrypt-key 65 | ---- 66 | 67 | 2. `seeker` 启动的时候会自动将本机 DNS 修改为 `127.0.0.1`,退出的时候将 DNS 设置为默认值 68 | 69 | == Config 70 | 71 | * `seeker` 直接使用的 clash 的规则。目前支持 `DOMAIN` `DOMAIN-KEYWORD` `DOMAIN-SUFFIX` `MATCH` 规则,不支持 `IP` 相关的规则。 72 | * 支持的 `Action`: 73 | * `PROXY(proxy-group-name)` 走 proxy-group-name 代理组 74 | * `DIRECT` 直连 75 | * `REJECT` 拒绝 76 | * `PROBE(proxy-group-name)` 默认尝试直连,如果超时,则走 proxy-group-name 代理组。 77 | * 确保系统没有重复的 `tun_name` 78 | * 确保 TUN 的网络 `tun_ip` 和 `tun_cidr` 与当前所处网络环境不在一个网段 79 | * `seeker` 支持 socks5 代理、http 代理和 shadowsocks 代理。优先级为 socks5 代理 > shadowsocks 代理 > http 代理。 80 | * `redir` 模式下使用 iptables 的 redirect 功能,只支持 tcp 流量。 81 | 82 | 83 | === Sample Config 84 | https://github.com/gfreezy/seeker/blob/master/sample_config.yml 85 | 86 | 里面有详细的配置文件说明。 87 | 88 | === 支持的 method 89 | ``` 90 | Table 91 | Plain 92 | 93 | Aes128Cfb 94 | Aes128Cfb1 95 | Aes128Cfb8 96 | Aes128Cfb128 97 | Aes192Cfb 98 | Aes192Cfb1 99 | Aes192Cfb8 100 | Aes192Cfb128 101 | Aes256Cfb 102 | Aes256Cfb1 103 | Aes256Cfb8 104 | Aes256Cfb128 105 | Aes128Ctr 106 | Aes192Ctr 107 | Aes256Ctr 108 | 109 | Camellia128Cfb 110 | Camellia192Cfb 111 | Camellia256Cfb 112 | Camellia128Cfb1 113 | Camellia192Cfb1 114 | Camellia256Cfb1 115 | Camellia128Cfb8 116 | Camellia192Cfb8 117 | Camellia256Cfb8 118 | Camellia128Cfb128 119 | Camellia192Cfb128 120 | Camellia256Cfb128 121 | 122 | Rc4 123 | Rc4Md5 124 | 125 | ChaCha20 126 | Salsa20 127 | XSalsa20 128 | ChaCha20Ietf 129 | 130 | Aes128Gcm 131 | Aes256Gcm 132 | 133 | ChaCha20IetfPoly1305 134 | XChaCha20IetfPoly1305 135 | 136 | Aes128PmacSiv 137 | Aes256PmacSiv 138 | ``` 139 | == ⚠️使用 Socks5 或 http 代理服务器 140 | 使用 socks5 代理的时候,需要将所有直连的域名设置在配置文件里面,如果使用 ss 或者 vmess 之类的,需要将 ss 或 vmess server 141 | 的域名也加入配置文件。否则有可能会导致死循环,没法正常使用。 142 | 143 | ⚠️ http 代理只支持 `CONNECT` 协议,而且不支持 UDP 协议。 144 | 145 | == 指定 IP 或某网段走代理 146 | 在配置文件中增加 `IP-CIDR` 规则即可。默认情况下 IP 都是是直连,所以只需要添加 `PROXY` 和 `PROBE`。如下: 147 | 148 | [source,yaml] 149 | ---- 150 | rules: 151 | - 'IP-CIDR,19.23.212.0/16,PROXY' 152 | - 'IP-CIDR,19.23.21.0/16,PROBE' 153 | ---- 154 | 155 | == 代理局域网内其他机器 156 | 1. 打开 `gateway_mode`。`gateway_mode` 开启后, `dns_server` 会自动覆盖为 `0.0.0.0:53` 157 | + 158 | [source,yaml] 159 | ---- 160 | gateway_mode: true 161 | ---- 162 | 163 | 2. 查看本地 IP 164 | + 165 | [source,shell script] 166 | ---- 167 | ifconfig 168 | ---- 169 | 170 | 3. 打开希望走代理的手机或者电脑的网络设置,将 **DNS** 与 **网关** 修改为步骤2获取到的 IP 171 | 172 | 173 | == 重置 DNS 分配 174 | 175 | [source,bash] 176 | ---- 177 | rm -rf seeker.sqlite 178 | ---- 179 | 180 | 181 | == FAQ 182 | . If you encountered `"seeker" cannot be opened because the developer cannot be verified.`, 183 | you can go to `System Preferences` -> `Security & Privacy` -> `General` and enable any 184 | blocked app from Allow apps downloaded from pane at the bottom of the window. 185 | 186 | . Ubuntu 提示 `Address already used`, 查看这里 https://unix.stackexchange.com/questions/304050/how-to-avoid-conflicts-between-dnsmasq-and-systemd-resolved 187 | 188 | == Build (latest stable) 189 | 190 | [source,bash] 191 | ---- 192 | git clone https://github.com/gfreezy/seeker.git 193 | cd seeker 194 | OPENSSL_STATIC=yes cargo build --release 195 | ---- 196 | 197 | 编译完成后,程序在 `target/release/seeker`。 198 | 199 | === musl 编译 200 | 201 | [source,shell] 202 | ---- 203 | docker run -v $PWD:/volume -e OPENSSL_STATIC=yes --rm -t clux/muslrust cargo build --release 204 | ---- 205 | 206 | 会在 `target/x86_64-unknown-linux-musl/release` 目录下生成 `seeker` 文件。 207 | 208 | 209 | == 实现原理 210 | `seeker` 参考了 `Surge for Mac` 的实现原理,基本如下: 211 | 212 | . `seeker` 会在本地启动一个 DNS server,并自动将本机 DNS 修改为 `seeker` 的 DNS 服务器地址 213 | . `seeker` 会创建一个 TUN 设备,并将 IP 设置为 `10.0.0.1`,系统路由表设置 `10.0.0.0/16` 网段都路由到 TUN 设备 214 | . 有应用请求 DNS 的时候, `seeker` 会为这个域名返回 `10.0.0.0/16` 网段内一个唯一的 IP 215 | . `seeker` 从 TUN 接受到 IP 包后,会在内部组装成 TCP/UDP 数据 216 | . `seeker` 会根据规则和网络连接的 uid 判断走代理还是直连 217 | . 如果需要走代理,将 TCP/UDP 数据转发到 SS 服务器/ socks5 代理,从代理接受到数据后,在返回给应用;如果直连,则本地建立直接将数据发送到目标地址 218 | 219 | 220 | == 如何发布新版本 221 | ``` 222 | $ cargo install cargo-release 223 | $ ./add-tag.sh 224 | ``` 225 | 226 | Github Action 会自动编译并发布新的 release。 227 | 228 | == License 229 | 230 | Licensed under either of 231 | 232 | * Apache License, Version 2.0 233 | ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 234 | * MIT license 235 | ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 236 | 237 | at your option. 238 | 239 | == Contribution 240 | 241 | Unless you explicitly state otherwise, any contribution intentionally submitted 242 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 243 | dual licensed as above, without any additional terms or conditions. 244 | -------------------------------------------------------------------------------- /add-tag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 检查当前的分支是否是 master 分支 3 | current_branch=$(git symbolic-ref --short -q HEAD) 4 | if [ "$current_branch" != "master" ]; then 5 | echo "Current branch is $current_branch, not master. Please checkout master branch before tagging." 6 | exit 1 7 | fi 8 | 9 | # 检查工作区是否干净 10 | if [ -n "$(git status --porcelain)" ]; then 11 | echo "Working directory is not clean. Please commit or stash your changes before tagging." 12 | exit 1 13 | fi 14 | 15 | # 获取今天的日期,格式为 YYYYMMDD 16 | major=$(date +"%Y%m%d") 17 | minor=0 18 | patch=0 19 | 20 | # 查找是否存在类似的版本号 21 | existing_tags=$(git tag | grep "${major}\.${minor}\.[0-9]\+") 22 | 23 | echo "Existing tags: $existing_tags" 24 | 25 | if [ -z "$existing_tags" ]; then 26 | # 如果没有类似的版本号,使用 0 作为 patch 27 | new_version="${major}.${minor}.${patch}" 28 | else 29 | # 如果有类似的版本号,找到 patch 最大的版本号 30 | max_patch=$(echo "$existing_tags" | awk -F'.' '{print $3}' | sort -nr | head -n 1) 31 | new_patch=$((max_patch + 1)) 32 | new_version="${major}.${minor}.${new_patch}" 33 | fi 34 | 35 | echo "New version: $new_version" 36 | 37 | cargo release --execute $new_version 38 | -------------------------------------------------------------------------------- /bench/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | large_file_server: 3 | build: http-server 4 | ports: 5 | - "8000:8000" 6 | volumes: 7 | - ./http-server:/app 8 | environment: 9 | - FLASK_ENV=development 10 | command: python main.py 11 | 12 | shadowsocks: 13 | image: shadowsocks/shadowsocks-libev 14 | container_name: shadowsocks-server 15 | ports: 16 | - "8388:8388" 17 | - "8388:8388/udp" 18 | environment: 19 | - PASSWORD=9MLSpPmNt # 设置你的密码 20 | - METHOD=aes-256-gcm # 加密方式,例:aes-256-gcm 21 | - DNS_ADDRS=114.114.114.114 # 可选,多个 DNS 地址用逗号分隔 22 | - TIMEOUT=10 23 | volumes: 24 | - ./shadowsocks/resolv.conf:/etc/resolv.conf:ro 25 | depends_on: 26 | - large_file_server 27 | -------------------------------------------------------------------------------- /bench/http-server/Dockerfile: -------------------------------------------------------------------------------- 1 | # 使用官方的 Python 基础镜像 2 | FROM python:3.9-slim 3 | 4 | # 设置工作目录 5 | WORKDIR /app 6 | 7 | # 安装 Flask 8 | RUN pip install flask 9 | 10 | # 将当前目录的内容复制到容器中的 /app 目录 11 | COPY . . 12 | 13 | # 暴露端口 8000 14 | EXPOSE 8000 15 | 16 | # 启动 Flask 应用 17 | CMD ["python", "main.py"] -------------------------------------------------------------------------------- /bench/http-server/create-large-file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 生成一个 10GB 的文件 3 | dd if=/dev/zero of=large_file.bin bs=1M count=10240 -------------------------------------------------------------------------------- /bench/http-server/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import Flask, send_file 3 | 4 | app = Flask(__name__) 5 | 6 | @app.route('/download') 7 | def download_large_file(): 8 | # 确保文件路径正确 9 | file_path = "large_file.bin" 10 | if os.path.exists(file_path): 11 | return send_file(file_path, as_attachment=True) 12 | else: 13 | return "File not found", 404 14 | 15 | if __name__ == '__main__': 16 | app.run(host='0.0.0.0', port=8000) -------------------------------------------------------------------------------- /bench/seeker/config.yml: -------------------------------------------------------------------------------- 1 | verbose: false 2 | dns_start_ip: 11.0.0.10 3 | dns_servers: # dns 服务器列表,如果不设置,会自动从系统获取。最好指定,否则 Wi-Fi 切换时可能会出现问题。 4 | - 223.5.5.5:53 5 | - 114.114.114.114:53 6 | - tcp://114.114.114.114:53 7 | dns_timeout: 1s 8 | # redir 模式使用 iptable 的 redirect 功能: iptables -t nat -A PREROUTING -d 11.0.0.0/16 -p tcp -j REDIRECT --to-ports 1300 9 | # redir 模式下只支持 tcp 流量。默认使用 tun 模式。特殊设备不支持 tun 的情况,可以使用 redir 模式。 10 | redir_mode: false 11 | db_path: bench-seeker2.sqlite 12 | queue_number: 2 13 | threads_per_queue: 3 14 | tun_bypass_direct: false # 直连的域名直接返回真实IP,不走tun 15 | tun_name: utun10 16 | tun_ip: 11.0.0.1 17 | tun_cidr: 11.0.0.0/16 18 | dns_listens: 19 | - 0.0.0.0:53 # 如果只是本机使用(不支持 docker) 20 | gateway_mode: true # 是否支持局域网共享 21 | probe_timeout: 200ms 22 | ping_timeout: 2s 23 | connect_timeout: 2s 24 | read_timeout: 300s 25 | write_timeout: 300s 26 | max_connect_errors: 2 27 | geo_ip: path/to/geoip.mmdb # geoip 数据库路径,如果使用相对路径,相对于可执行文件的路径。默认会搜索可执行文件同级目录下的 geoip.mmdb 文件 28 | ping_urls: 29 | - host: baidu.com 30 | port: 80 31 | path: / 32 | servers: 33 | - name: server1 34 | addr: 192.168.97.3:8388 # 替换成 ss 服务器的地址 35 | method: aes-256-gcm 36 | password: 9MLSpPmNt 37 | protocol: Shadowsocks 38 | rules: 39 | # - 'DOMAIN-KEYWORD,allsunday.io,PROXY' 40 | - 'MATCH,DIRECT' 41 | -------------------------------------------------------------------------------- /bench/shadowsocks/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 114.114.114.114 -------------------------------------------------------------------------------- /bench/ss-server/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 114.114.114.114 -------------------------------------------------------------------------------- /ci/cargo-out-dir: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Finds Cargo's `OUT_DIR` directory from the most recent build. 4 | # 5 | # This requires one parameter corresponding to the target directory 6 | # to search for the build output. 7 | 8 | if [ $# != 1 ]; then 9 | echo "Usage: $(basename "$0") " >&2 10 | exit 2 11 | fi 12 | 13 | # This works by finding the most recent stamp file, which is produced by 14 | # every ripgrep build. 15 | target_dir="$1" 16 | find "$target_dir" -name ripgrep-stamp -print0 \ 17 | | xargs -0 ls -t \ 18 | | head -n1 \ 19 | | xargs dirname 20 | -------------------------------------------------------------------------------- /ci/macos-install-packages: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | -------------------------------------------------------------------------------- /ci/ubuntu-install-packages: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | apt-get update 3 | apt-get install -y --no-install-recommends \ 4 | libssl-dev xz-utils liblz4-tool build-essential clang 5 | -------------------------------------------------------------------------------- /config/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "config" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | serde = { workspace = true, features = ["derive", "rc"] } 11 | url = { workspace = true, features = ["serde"] } 12 | serde_yaml = { workspace = true } 13 | bytes = { workspace = true } 14 | crypto = { path = "../crypto" } 15 | socks5_client = { path = "../socks5_client" } 16 | tcp_connection = { path = "../tcp_connection" } 17 | smoltcp = { workspace = true, features = ["proto-ipv6", "proto-ipv4", "std"] } 18 | base64 = { workspace = true } 19 | percent-encoding = { workspace = true } 20 | tracing = { workspace = true } 21 | ureq = { workspace = true } 22 | maxminddb = { workspace = true } 23 | parking_lot = { workspace = true } 24 | store = { path = "../store" } 25 | 26 | [dev-dependencies] 27 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 28 | -------------------------------------------------------------------------------- /crypto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "crypto" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | bytes = { workspace = true } 11 | rand = { workspace = true } 12 | md-5 = { workspace = true } 13 | digest = { workspace = true } 14 | typenum = { workspace = true } 15 | byte_string = { workspace = true } 16 | byteorder = { workspace = true } 17 | openssl = { workspace = true, features = ["vendored"], optional = true } 18 | libc = { workspace = true } 19 | hkdf = { workspace = true } 20 | sha-1 = { workspace = true } 21 | libsodium-sys-stable = { workspace = true, optional = true } 22 | ring = { workspace = true, optional = true } 23 | 24 | [features] 25 | default = ["sodium", "aes-cfb", "aes-ctr", "camellia-cfb", "use-ring"] 26 | sodium = ["libsodium-sys-stable"] 27 | aes-cfb = ["openssl"] 28 | aes-ctr = ["openssl"] 29 | camellia-cfb = ["openssl"] 30 | use-ring = ["ring"] 31 | rc4 = [] 32 | miscreant = [] 33 | -------------------------------------------------------------------------------- /crypto/src/aead.rs: -------------------------------------------------------------------------------- 1 | //! Aead Ciphers 2 | 3 | use crate::cipher::{CipherCategory, CipherResult, CipherType}; 4 | 5 | #[cfg(feature = "use-ring")] 6 | use crate::ring::RingAeadCipher; 7 | #[cfg(feature = "miscreant")] 8 | use crate::siv::MiscreantCipher; 9 | #[cfg(feature = "sodium")] 10 | use crate::sodium::SodiumAeadCipher; 11 | 12 | use bytes::{Bytes, BytesMut}; 13 | use hkdf::Hkdf; 14 | use sha1::Sha1; 15 | 16 | /// Encryptor API for AEAD ciphers 17 | pub trait AeadEncryptor { 18 | /// Encrypt `input` to `output` with `tag`. `output.len()` should equals to `input.len() + tag.len()`. 19 | /// ```plain 20 | /// +----------------------------------------+-----------------------+ 21 | /// | ENCRYPTED TEXT (length = input.len()) | TAG | 22 | /// +----------------------------------------+-----------------------+ 23 | /// ``` 24 | fn encrypt(&mut self, input: &[u8], output: &mut [u8]); 25 | } 26 | 27 | /// Decryptor API for AEAD ciphers 28 | pub trait AeadDecryptor { 29 | /// Decrypt `input` to `output` with `tag`. `output.len()` should equals to `input.len() - tag.len()`. 30 | /// ```plain 31 | /// +----------------------------------------+-----------------------+ 32 | /// | ENCRYPTED TEXT (length = output.len()) | TAG | 33 | /// +----------------------------------------+-----------------------+ 34 | /// ``` 35 | fn decrypt(&mut self, input: &[u8], output: &mut [u8]) -> CipherResult<()>; 36 | } 37 | 38 | /// Variant `AeadDecryptor` 39 | pub type BoxAeadDecryptor = Box; 40 | 41 | /// Variant `AeadEncryptor` 42 | pub type BoxAeadEncryptor = Box; 43 | 44 | /// Generate a specific AEAD cipher encryptor 45 | pub fn new_aead_encryptor(t: CipherType, key: &[u8], nonce: &[u8]) -> BoxAeadEncryptor { 46 | assert!(t.category() == CipherCategory::Aead); 47 | 48 | match t { 49 | #[cfg(feature = "use-ring")] 50 | CipherType::Aes128Gcm | CipherType::Aes256Gcm | CipherType::ChaCha20IetfPoly1305 => { 51 | Box::new(RingAeadCipher::new(t, key, nonce, true)) 52 | } 53 | 54 | #[cfg(feature = "sodium")] 55 | CipherType::XChaCha20IetfPoly1305 => Box::new(SodiumAeadCipher::new(t, key, nonce)), 56 | 57 | #[cfg(feature = "miscreant")] 58 | CipherType::Aes128PmacSiv | CipherType::Aes256PmacSiv => { 59 | Box::new(MiscreantCipher::new(t, key, nonce)) 60 | } 61 | 62 | _ => unreachable!(), 63 | } 64 | } 65 | 66 | /// Generate a specific AEAD cipher decryptor 67 | pub fn new_aead_decryptor(t: CipherType, key: &[u8], nonce: &[u8]) -> BoxAeadDecryptor { 68 | assert!(t.category() == CipherCategory::Aead); 69 | 70 | match t { 71 | #[cfg(feature = "use-ring")] 72 | CipherType::Aes128Gcm | CipherType::Aes256Gcm | CipherType::ChaCha20IetfPoly1305 => { 73 | Box::new(RingAeadCipher::new(t, key, nonce, false)) 74 | } 75 | 76 | #[cfg(feature = "sodium")] 77 | CipherType::XChaCha20IetfPoly1305 => Box::new(SodiumAeadCipher::new(t, key, nonce)), 78 | 79 | #[cfg(feature = "miscreant")] 80 | CipherType::Aes128PmacSiv | CipherType::Aes256PmacSiv => { 81 | Box::new(MiscreantCipher::new(t, key, nonce)) 82 | } 83 | 84 | _ => unreachable!(), 85 | } 86 | } 87 | 88 | const SUBKEY_INFO: &[u8] = b"ss-subkey"; 89 | 90 | /// Make Session key 91 | /// 92 | /// ## Session key (SIP007) 93 | /// 94 | /// AEAD ciphers require a per-session subkey derived from the pre-shared master key using HKDF, and use the subkey 95 | /// to encrypt/decrypt. Essentially it means we are moving from (M+N)-bit (PSK, nonce) pair to 96 | /// (M+N)-bit (HKDF(PSK, salt), nonce) pair. Because HKDF is a PRF, the new construction significantly expands the 97 | /// amount of randomness (from N to at least M where M is much greater than N), thus correcting the previously 98 | /// mentioned design flaw. 99 | /// 100 | /// Assuming we already have a user-supplied pre-shared master key PSK. 101 | /// 102 | /// Function HKDF_SHA1 is a HKDF constructed using SHA1 hash. Its signature is 103 | /// 104 | /// ```plain 105 | /// HKDF_SHA1(secret_key, salt, info) 106 | /// ``` 107 | /// 108 | /// The "info" string argument allows us to bind the derived subkey to a specific application context. 109 | /// 110 | /// For AEAD ciphers, the encryption scheme is: 111 | /// 112 | /// 1. Pick a random R-bit salt (R = max(128, len(SK))) 113 | /// 2. Derive subkey SK = HKDF_SHA1(PSK, salt, "ss-subkey") 114 | /// 3. Send salt 115 | /// 4. For each chunk, encrypt and authenticate payload using SK with a counting nonce 116 | /// (starting from 0 and increment by 1 after each use) 117 | /// 5. Send encrypted chunk 118 | pub fn make_skey(t: CipherType, key: &[u8], salt: &[u8]) -> Bytes { 119 | assert!(t.category() == CipherCategory::Aead); 120 | 121 | let hkdf = Hkdf::::new(Some(salt), key); 122 | 123 | let mut skey = BytesMut::with_capacity(key.len()); 124 | unsafe { 125 | skey.set_len(key.len()); 126 | } 127 | 128 | hkdf.expand(SUBKEY_INFO, &mut skey).unwrap(); 129 | 130 | skey.freeze() 131 | } 132 | 133 | /// Increase nonce by 1 134 | /// 135 | /// AEAD ciphers requires to increase nonce after encrypt/decrypt every chunk 136 | #[cfg(feature = "sodium")] 137 | pub fn increase_nonce(nonce: &mut [u8]) { 138 | use libsodium_sys::sodium_increment; 139 | 140 | unsafe { 141 | sodium_increment(nonce.as_mut_ptr(), nonce.len()); 142 | } 143 | } 144 | 145 | /// Increase nonce by 1 146 | /// 147 | /// AEAD ciphers requires to increase nonce after encrypt/decrypt every chunk 148 | #[cfg(not(feature = "sodium"))] 149 | pub fn increase_nonce(nonce: &mut [u8]) { 150 | for i in nonce { 151 | if std::u8::MAX == *i { 152 | *i = 0; 153 | } else { 154 | *i += 1; 155 | return; 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /crypto/src/digest.rs: -------------------------------------------------------------------------------- 1 | //! Message digest algorithm 2 | 3 | use digest::OutputSizeUser; 4 | use md5::Md5; 5 | use sha1::Sha1; 6 | 7 | use bytes::BufMut; 8 | 9 | /// Digest trait 10 | pub trait Digest: Send { 11 | /// Update data 12 | fn update(&mut self, data: &[u8]); 13 | 14 | /// Generates digest 15 | fn digest_reset(&mut self, buf: &mut B); 16 | 17 | /// Length of digest 18 | fn digest_len(&self) -> usize; 19 | } 20 | 21 | /// Type of defined digests 22 | #[derive(Clone, Copy)] 23 | pub enum DigestType { 24 | Md5, 25 | Sha1, 26 | Sha, 27 | } 28 | 29 | /// Create digest with type 30 | pub fn with_type(t: DigestType) -> DigestVariant { 31 | match t { 32 | DigestType::Md5 => DigestVariant::Md5(Md5::default()), 33 | DigestType::Sha1 | DigestType::Sha => DigestVariant::Sha1(Sha1::default()), 34 | } 35 | } 36 | 37 | /// Variant of supported digest 38 | pub enum DigestVariant { 39 | Md5(Md5), 40 | Sha1(Sha1), 41 | } 42 | 43 | impl Digest for DigestVariant { 44 | fn update(&mut self, data: &[u8]) { 45 | use md5::Digest; 46 | 47 | match *self { 48 | DigestVariant::Md5(ref mut d) => d.update(data), 49 | DigestVariant::Sha1(ref mut d) => d.update(data), 50 | } 51 | } 52 | 53 | fn digest_reset(&mut self, buf: &mut B) { 54 | use digest::Digest; 55 | match self { 56 | DigestVariant::Md5(d) => buf.put(&*d.finalize_reset()), 57 | DigestVariant::Sha1(d) => buf.put(&*d.finalize_reset()), 58 | } 59 | } 60 | 61 | fn digest_len(&self) -> usize { 62 | match *self { 63 | DigestVariant::Md5(_) => ::output_size(), 64 | DigestVariant::Sha1(_) => ::output_size(), 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /crypto/src/dummy.rs: -------------------------------------------------------------------------------- 1 | //! Dummy cipher, encrypt and decrypt nothing 2 | 3 | use super::{CipherResult, StreamCipher}; 4 | 5 | use bytes::BufMut; 6 | 7 | /// Dummy cipher 8 | /// 9 | /// Copies data directly to output, very dummy 10 | pub struct DummyCipher; 11 | 12 | impl StreamCipher for DummyCipher { 13 | fn update(&mut self, data: &[u8], out: &mut dyn BufMut) -> CipherResult<()> { 14 | out.put_slice(data); 15 | Ok(()) 16 | } 17 | 18 | fn finalize(&mut self, _: &mut dyn BufMut) -> CipherResult<()> { 19 | Ok(()) 20 | } 21 | 22 | fn buffer_size(&self, data: &[u8]) -> usize { 23 | data.len() 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /crypto/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Crypto methods for shadowsocks 2 | 3 | pub use self::{ 4 | aead::{ 5 | new_aead_decryptor, new_aead_encryptor, AeadDecryptor, AeadEncryptor, BoxAeadDecryptor, 6 | BoxAeadEncryptor, 7 | }, 8 | cipher::{CipherCategory, CipherResult, CipherType}, 9 | stream::{new_stream, BoxStreamCipher, StreamCipher}, 10 | }; 11 | #[cfg(feature = "openssl")] 12 | use ::openssl::symm; 13 | 14 | pub mod aead; 15 | pub mod cipher; 16 | pub mod digest; 17 | pub mod dummy; 18 | #[cfg(feature = "openssl")] 19 | pub mod openssl; 20 | #[cfg(feature = "use-ring")] 21 | pub mod ring; 22 | #[cfg(feature = "miscreant")] 23 | pub mod siv; 24 | #[cfg(feature = "sodium")] 25 | pub mod sodium; 26 | pub mod stream; 27 | pub mod table; 28 | 29 | /// Crypto mode, encrypt or decrypt 30 | #[derive(Clone, Copy, Eq, PartialEq, Debug)] 31 | pub enum CryptoMode { 32 | Encrypt, 33 | Decrypt, 34 | } 35 | 36 | #[cfg(feature = "openssl")] 37 | impl std::convert::From for symm::Mode { 38 | fn from(m: CryptoMode) -> symm::Mode { 39 | match m { 40 | CryptoMode::Encrypt => symm::Mode::Encrypt, 41 | CryptoMode::Decrypt => symm::Mode::Decrypt, 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /crypto/src/ring.rs: -------------------------------------------------------------------------------- 1 | //! Cipher defined with Ring 2 | 3 | use ring::{ 4 | aead::{ 5 | Aad, Algorithm, Nonce, NonceSequence, OpeningKey, SealingKey, UnboundKey, AES_128_GCM, 6 | AES_256_GCM, CHACHA20_POLY1305, NONCE_LEN, 7 | }, 8 | error::Unspecified, 9 | }; 10 | 11 | use crate::{ 12 | aead::{increase_nonce, make_skey}, 13 | cipher::Error, 14 | AeadDecryptor, AeadEncryptor, CipherResult, CipherType, 15 | }; 16 | 17 | use byte_string::ByteStr; 18 | use bytes::{BufMut, BytesMut}; 19 | 20 | /// AEAD ciphers provided by Ring 21 | pub enum RingAeadCryptoVariant { 22 | Seal(SealingKey), 23 | Open(OpeningKey), 24 | } 25 | 26 | pub struct RingAeadNonceSequence { 27 | nonce: [u8; NONCE_LEN], 28 | } 29 | 30 | impl RingAeadNonceSequence { 31 | fn new() -> RingAeadNonceSequence { 32 | RingAeadNonceSequence { 33 | nonce: [0u8; NONCE_LEN], 34 | } 35 | } 36 | } 37 | 38 | impl NonceSequence for RingAeadNonceSequence { 39 | fn advance(&mut self) -> Result { 40 | let nonce = Nonce::assume_unique_for_key(self.nonce); 41 | increase_nonce(&mut self.nonce); 42 | Ok(nonce) 43 | } 44 | } 45 | 46 | /// AEAD Cipher context 47 | /// 48 | /// According to SIP004, the `nonce` has to incr 1 after each encrypt/decrypt. 49 | pub struct RingAeadCipher { 50 | cipher: RingAeadCryptoVariant, 51 | cipher_type: CipherType, 52 | } 53 | 54 | impl RingAeadCipher { 55 | /// Initialize context 56 | pub fn new(t: CipherType, key: &[u8], salt: &[u8], is_seal: bool) -> RingAeadCipher { 57 | // TODO: Check if salt is duplicated 58 | 59 | // Nonce is 12 bytes 60 | assert_eq!(t.iv_size(), NONCE_LEN); 61 | 62 | let skey = make_skey(t, key, salt); 63 | let cipher = RingAeadCipher::new_variant(t, &skey, is_seal); 64 | RingAeadCipher { 65 | cipher, 66 | cipher_type: t, 67 | } 68 | } 69 | 70 | fn new_variant(t: CipherType, key: &[u8], is_seal: bool) -> RingAeadCryptoVariant { 71 | match t { 72 | CipherType::Aes128Gcm => RingAeadCipher::new_crypt(&AES_128_GCM, key, is_seal), 73 | CipherType::Aes256Gcm => RingAeadCipher::new_crypt(&AES_256_GCM, key, is_seal), 74 | CipherType::ChaCha20IetfPoly1305 => { 75 | RingAeadCipher::new_crypt(&CHACHA20_POLY1305, key, is_seal) 76 | } 77 | _ => panic!("unsupported cipher in ring {t:?}"), 78 | } 79 | } 80 | 81 | #[inline] 82 | fn new_crypt( 83 | algorithm: &'static Algorithm, 84 | key: &[u8], 85 | is_seal: bool, 86 | ) -> RingAeadCryptoVariant { 87 | use ring::aead::BoundKey; 88 | 89 | let unbound_key = UnboundKey::new(algorithm, key).unwrap(); 90 | 91 | if is_seal { 92 | RingAeadCryptoVariant::Seal(SealingKey::new(unbound_key, RingAeadNonceSequence::new())) 93 | } else { 94 | RingAeadCryptoVariant::Open(OpeningKey::new(unbound_key, RingAeadNonceSequence::new())) 95 | } 96 | } 97 | } 98 | 99 | impl AeadEncryptor for RingAeadCipher { 100 | fn encrypt(&mut self, input: &[u8], output: &mut [u8]) { 101 | let tag_len = self.cipher_type.tag_size(); 102 | let buf_len = input.len() + tag_len; 103 | assert_eq!(output.len(), buf_len); 104 | 105 | let mut buf = BytesMut::with_capacity(output.len()); 106 | buf.put_slice(input); 107 | 108 | if let RingAeadCryptoVariant::Seal(ref mut key) = self.cipher { 109 | key.seal_in_place_append_tag(Aad::empty(), &mut buf) 110 | .unwrap(); 111 | } else { 112 | unreachable!("encrypt is called on a non-seal cipher"); 113 | } 114 | 115 | output.copy_from_slice(&buf[..buf_len]); 116 | } 117 | } 118 | 119 | impl AeadDecryptor for RingAeadCipher { 120 | fn decrypt(&mut self, input: &[u8], output: &mut [u8]) -> CipherResult<()> { 121 | let tag_len = self.cipher_type.tag_size(); 122 | assert_eq!(output.len() + tag_len, input.len()); 123 | 124 | let mut buf = BytesMut::with_capacity(input.len()); 125 | buf.put_slice(input); 126 | 127 | if let RingAeadCryptoVariant::Open(ref mut key) = self.cipher { 128 | match key.open_in_place(Aad::empty(), &mut buf) { 129 | Ok(obuf) => { 130 | output.copy_from_slice(obuf); 131 | Ok(()) 132 | } 133 | Err(..) => { 134 | eprintln!( 135 | "AEAD decrypt failed, input={:?}, tag={:?}, opening: {:?}", 136 | ByteStr::new(&input[..output.len()]), 137 | ByteStr::new(&input[output.len()..]), 138 | key, 139 | ); 140 | Err(Error::AeadDecryptFailed) 141 | } 142 | } 143 | } else { 144 | unreachable!("decrypt is called on a non-open cipher"); 145 | } 146 | } 147 | } 148 | 149 | #[cfg(test)] 150 | mod test { 151 | use super::*; 152 | use crate::CipherType; 153 | 154 | fn test_ring_aead(ct: CipherType) { 155 | let key = ct.bytes_to_key(b"PassWORD"); 156 | let message = b"message"; 157 | 158 | let iv = ct.gen_init_vec(); 159 | 160 | let mut enc = RingAeadCipher::new(ct, &key[..], &iv[..], true); 161 | 162 | let mut encrypted_msg = vec![0u8; message.len() + ct.tag_size()]; 163 | enc.encrypt(message, &mut encrypted_msg); 164 | 165 | assert_ne!(message, &encrypted_msg[..]); 166 | 167 | let mut dec = RingAeadCipher::new(ct, &key[..], &iv[..], false); 168 | let mut decrypted_msg = vec![0u8; message.len()]; 169 | dec.decrypt(&encrypted_msg[..], &mut decrypted_msg).unwrap(); 170 | 171 | assert_eq!(&decrypted_msg[..], message); 172 | } 173 | 174 | #[test] 175 | fn test_ring_aes128gcm() { 176 | test_ring_aead(CipherType::Aes128Gcm); 177 | } 178 | 179 | #[test] 180 | fn test_ring_aes256gcm() { 181 | test_ring_aead(CipherType::Aes256Gcm); 182 | } 183 | 184 | #[test] 185 | fn test_ring_chacha20poly1305() { 186 | test_ring_aead(CipherType::ChaCha20IetfPoly1305); 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /crypto/src/siv.rs: -------------------------------------------------------------------------------- 1 | //! Cipher defined with Miscreant 2 | 3 | use std::ptr; 4 | 5 | use miscreant::{Aead, Aes128PmacSivAead, Aes256PmacSivAead}; 6 | 7 | use crate::{ 8 | aead::{increase_nonce, make_skey}, 9 | cipher::Error, 10 | AeadDecryptor, AeadEncryptor, CipherResult, CipherType, 11 | }; 12 | 13 | use byte_string::ByteStr; 14 | use bytes::{BufMut, BytesMut}; 15 | use log::error; 16 | 17 | /// AEAD ciphers provided by Miscreant 18 | pub enum MiscreantCryptoVariant { 19 | Aes128(Aes128PmacSivAead), 20 | Aes256(Aes256PmacSivAead), 21 | } 22 | 23 | /// AEAD Cipher context 24 | /// 25 | /// According to SIP004, the `nonce` has to incr 1 after each encrypt/decrypt. 26 | pub struct MiscreantCipher { 27 | cipher_type: CipherType, 28 | cipher: MiscreantCryptoVariant, 29 | nonce: BytesMut, 30 | } 31 | 32 | impl MiscreantCipher { 33 | /// Initialize context 34 | pub fn new(t: CipherType, key: &[u8], salt: &[u8]) -> Self { 35 | // NOTE: Don't need check salt is duplicated. :) 36 | 37 | let nonce_size = t.iv_size(); 38 | let mut nonce = BytesMut::with_capacity(nonce_size); 39 | unsafe { 40 | nonce.set_len(nonce_size); 41 | ptr::write_bytes(nonce.as_mut_ptr(), 0, nonce_size); 42 | } 43 | 44 | let skey = make_skey(t, key, salt); 45 | let cipher = Self::new_variant(t, &skey); 46 | MiscreantCipher { 47 | cipher_type: t, 48 | cipher: cipher, 49 | nonce: nonce, 50 | } 51 | } 52 | 53 | fn new_variant(t: CipherType, key: &[u8]) -> MiscreantCryptoVariant { 54 | match t { 55 | CipherType::Aes128PmacSiv => { 56 | let mut skey = [0; 32]; 57 | skey.copy_from_slice(key); 58 | MiscreantCryptoVariant::Aes128(Aes128PmacSivAead::new(&skey)) 59 | } 60 | CipherType::Aes256PmacSiv => { 61 | let mut skey = [0; 64]; 62 | skey.copy_from_slice(key); 63 | MiscreantCryptoVariant::Aes256(Aes256PmacSivAead::new(&skey)) 64 | } 65 | _ => panic!("unsupported cipher in miscreant {:?}", t), 66 | } 67 | } 68 | } 69 | 70 | impl AeadEncryptor for MiscreantCipher { 71 | fn encrypt(&mut self, input: &[u8], output: &mut [u8]) { 72 | let tag_len = self.cipher_type.tag_size(); 73 | let buf_len = input.len() + tag_len; 74 | 75 | // Miscreant requires tag to be in front of text 76 | let mut buf = BytesMut::with_capacity(buf_len); 77 | unsafe { 78 | buf.set_len(buf_len); 79 | } 80 | buf[tag_len..].copy_from_slice(input); 81 | 82 | // NOTE: Must swap tag and encrypted text to output 83 | match self.cipher { 84 | MiscreantCryptoVariant::Aes128(ref mut cipher) => { 85 | cipher.encrypt_in_place(&self.nonce, b"", &mut buf); 86 | output[..input.len()].copy_from_slice(&buf[tag_len..]); 87 | output[input.len()..].copy_from_slice(&buf[..tag_len]); 88 | } 89 | MiscreantCryptoVariant::Aes256(ref mut cipher) => { 90 | cipher.encrypt_in_place(&self.nonce, b"", &mut buf); 91 | output[..input.len()].copy_from_slice(&buf[tag_len..]); 92 | output[input.len()..].copy_from_slice(&buf[..tag_len]); 93 | } 94 | } 95 | 96 | increase_nonce(&mut self.nonce); 97 | } 98 | } 99 | 100 | impl AeadDecryptor for MiscreantCipher { 101 | fn decrypt(&mut self, input: &[u8], output: &mut [u8]) -> CipherResult<()> { 102 | let tag_size = self.cipher_type.tag_size(); 103 | 104 | // Swap encrypted text and tag 105 | // Miscreant requires tag in front of encrypted text 106 | let mut buf = BytesMut::with_capacity(input.len() + tag_size); 107 | buf.put_slice(&input[input.len() - tag_size..]); 108 | buf.put_slice(&input[..input.len() - tag_size]); 109 | 110 | let result = match self.cipher { 111 | MiscreantCryptoVariant::Aes128(ref mut cipher) => { 112 | cipher.decrypt_in_place(&self.nonce, b"", &mut buf) 113 | } 114 | MiscreantCryptoVariant::Aes256(ref mut cipher) => { 115 | cipher.decrypt_in_place(&self.nonce, b"", &mut buf) 116 | } 117 | }; 118 | 119 | result 120 | .map(|buf| { 121 | output.copy_from_slice(buf); 122 | increase_nonce(&mut self.nonce); 123 | }) 124 | .map_err(|_| { 125 | error!( 126 | "AEAD decrypt failed, nonce={:?}, input={:?}, tag={:?}, err: decrypt failure", 127 | ByteStr::new(&self.nonce), 128 | ByteStr::new(&input[..input.len() - tag_size]), 129 | ByteStr::new(&input[input.len() - tag_size..]) 130 | ); 131 | Error::AeadDecryptFailed 132 | }) 133 | } 134 | } 135 | 136 | #[cfg(test)] 137 | mod test { 138 | use super::*; 139 | 140 | fn test_miscreant(ct: CipherType) { 141 | let key = ct.bytes_to_key(b"PassWORD"); 142 | let message = b"message"; 143 | 144 | let iv = ct.gen_init_vec(); 145 | 146 | let mut enc = MiscreantCipher::new(ct, &key[..], &iv[..]); 147 | 148 | let mut encrypted_msg = vec![0u8; message.len() + ct.tag_size()]; 149 | enc.encrypt(message, &mut encrypted_msg); 150 | 151 | assert_ne!(message, &encrypted_msg[..]); 152 | 153 | let mut dec = MiscreantCipher::new(ct, &key[..], &iv[..]); 154 | let mut decrypted_msg = vec![0u8; message.len()]; 155 | dec.decrypt(&encrypted_msg[..], &mut decrypted_msg).unwrap(); 156 | 157 | assert_eq!(&decrypted_msg[..], message); 158 | } 159 | 160 | #[test] 161 | fn test_rust_crypto_cipher_aes_128_pmac_siv() { 162 | test_miscreant(CipherType::Aes128PmacSiv); 163 | } 164 | 165 | #[test] 166 | fn test_rust_crypto_cipher_aes_256_pmac_siv() { 167 | test_miscreant(CipherType::Aes256PmacSiv); 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /crypto/src/stream.rs: -------------------------------------------------------------------------------- 1 | //! Stream ciphers 2 | 3 | #[cfg(feature = "openssl")] 4 | use crate::openssl; 5 | #[cfg(feature = "sodium")] 6 | use crate::sodium; 7 | use crate::{ 8 | cipher::{CipherCategory, CipherResult, CipherType}, 9 | dummy, table, CryptoMode, 10 | }; 11 | 12 | use bytes::BufMut; 13 | 14 | /// Basic operation of Cipher, which is a Symmetric Cipher. 15 | /// 16 | /// The `update` method could be called multiple times, and the `finalize` method will 17 | /// encrypt the last block 18 | pub trait StreamCipher { 19 | fn update(&mut self, data: &[u8], out: &mut dyn BufMut) -> CipherResult<()>; 20 | fn finalize(&mut self, out: &mut dyn BufMut) -> CipherResult<()>; 21 | fn buffer_size(&self, data: &[u8]) -> usize; 22 | } 23 | 24 | /// Variant cipher which contains all possible stream ciphers 25 | pub type BoxStreamCipher = Box; 26 | 27 | /// Generate a specific Cipher with key and initialize vector 28 | #[allow(unused_variables)] 29 | pub fn new_stream(t: CipherType, key: &[u8], iv: &[u8], mode: CryptoMode) -> BoxStreamCipher { 30 | assert!( 31 | t.category() == CipherCategory::Stream, 32 | "only allow initializing with stream cipher" 33 | ); 34 | 35 | match t { 36 | CipherType::Table => Box::new(table::TableCipher::new(key, mode)), 37 | CipherType::Plain => Box::new(dummy::DummyCipher), 38 | 39 | #[cfg(feature = "sodium")] 40 | CipherType::ChaCha20 41 | | CipherType::Salsa20 42 | | CipherType::XSalsa20 43 | | CipherType::ChaCha20Ietf => Box::new(sodium::SodiumStreamCipher::new(t, key, iv)), 44 | 45 | #[cfg(feature = "rc4")] 46 | CipherType::Rc4Md5 => Box::new(rc4_md5::Rc4Md5Cipher::new(key, iv, mode)), 47 | 48 | #[cfg(feature = "aes-cfb")] 49 | CipherType::Aes128Cfb 50 | | CipherType::Aes128Cfb1 51 | | CipherType::Aes128Cfb8 52 | | CipherType::Aes128Cfb128 53 | | CipherType::Aes192Cfb 54 | | CipherType::Aes192Cfb1 55 | | CipherType::Aes192Cfb8 56 | | CipherType::Aes192Cfb128 57 | | CipherType::Aes256Cfb 58 | | CipherType::Aes256Cfb1 59 | | CipherType::Aes256Cfb8 60 | | CipherType::Aes256Cfb128 => Box::new(openssl::OpenSSLCipher::new(t, key, iv, mode)), 61 | 62 | #[cfg(feature = "aes-ctr")] 63 | CipherType::Aes128Ctr | CipherType::Aes192Ctr | CipherType::Aes256Ctr => { 64 | Box::new(openssl::OpenSSLCipher::new(t, key, iv, mode)) 65 | } 66 | 67 | #[cfg(feature = "camellia-cfb")] 68 | CipherType::Camellia128Cfb 69 | | CipherType::Camellia128Cfb1 70 | | CipherType::Camellia128Cfb8 71 | | CipherType::Camellia128Cfb128 72 | | CipherType::Camellia192Cfb 73 | | CipherType::Camellia192Cfb1 74 | | CipherType::Camellia192Cfb8 75 | | CipherType::Camellia192Cfb128 76 | | CipherType::Camellia256Cfb 77 | | CipherType::Camellia256Cfb1 78 | | CipherType::Camellia256Cfb8 79 | | CipherType::Camellia256Cfb128 => Box::new(openssl::OpenSSLCipher::new(t, key, iv, mode)), 80 | 81 | _ => unreachable!("{} is not a stream cipher", t), 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /crypto/src/table.rs: -------------------------------------------------------------------------------- 1 | //! This module implements the `table` cipher for fallback compatibility 2 | 3 | use std::io::Cursor; 4 | 5 | use crate::{ 6 | digest::{self, Digest, DigestType}, 7 | CipherResult, CryptoMode, StreamCipher, 8 | }; 9 | 10 | use byteorder::{LittleEndian, ReadBytesExt}; 11 | use bytes::{BufMut, BytesMut}; 12 | 13 | const TABLE_SIZE: usize = 256usize; 14 | 15 | /// Table cipher 16 | pub struct TableCipher { 17 | table: [u8; TABLE_SIZE], 18 | } 19 | 20 | impl TableCipher { 21 | pub fn new(key: &[u8], mode: CryptoMode) -> TableCipher { 22 | let mut md5_digest = digest::with_type(DigestType::Md5); 23 | md5_digest.update(key); 24 | let mut key_digest = BytesMut::with_capacity(md5_digest.digest_len()); 25 | md5_digest.digest_reset(&mut key_digest); 26 | 27 | let mut bufr = Cursor::new(&key_digest[..]); 28 | let a = bufr.read_u64::().unwrap(); 29 | 30 | let mut table = [0u64; TABLE_SIZE]; 31 | for (i, element) in table.iter_mut().enumerate() { 32 | *element = i as u64; 33 | } 34 | 35 | for i in 1..1024 { 36 | table.sort_by_key(|x| a % (*x + i)) 37 | } 38 | 39 | TableCipher { 40 | table: match mode { 41 | CryptoMode::Encrypt => { 42 | let mut t = [0u8; TABLE_SIZE]; 43 | for i in 0..TABLE_SIZE { 44 | t[i] = table[i] as u8; 45 | } 46 | t 47 | } 48 | CryptoMode::Decrypt => { 49 | let mut t = [0u8; TABLE_SIZE]; 50 | for (idx, &item) in table.iter().enumerate() { 51 | t[item as usize] = idx as u8; 52 | } 53 | t 54 | } 55 | }, 56 | } 57 | } 58 | 59 | fn process(&mut self, data: &[u8], out: &mut dyn BufMut) -> CipherResult<()> { 60 | let mut buf = BytesMut::with_capacity(self.buffer_size(data)); 61 | unsafe { 62 | buf.set_len(self.buffer_size(data)); // Set length 63 | } 64 | for (idx, d) in data.iter().enumerate() { 65 | buf[idx] = self.table[*d as usize]; 66 | } 67 | out.put_slice(&buf); 68 | Ok(()) 69 | } 70 | } 71 | 72 | impl StreamCipher for TableCipher { 73 | fn update(&mut self, data: &[u8], out: &mut dyn BufMut) -> CipherResult<()> { 74 | self.process(data, out) 75 | } 76 | 77 | fn finalize(&mut self, _: &mut dyn BufMut) -> CipherResult<()> { 78 | Ok(()) 79 | } 80 | 81 | fn buffer_size(&self, data: &[u8]) -> usize { 82 | data.len() 83 | } 84 | } 85 | 86 | #[test] 87 | fn test_table_cipher() { 88 | let message = "hello world"; 89 | let key = "keykeykk"; 90 | 91 | let mut enc = TableCipher::new(key.as_bytes(), CryptoMode::Encrypt); 92 | let mut dec = TableCipher::new(key.as_bytes(), CryptoMode::Decrypt); 93 | let mut encrypted_msg = Vec::new(); 94 | enc.update(message.as_bytes(), &mut encrypted_msg).unwrap(); 95 | let mut decrypted_msg = Vec::new(); 96 | dec.update(&encrypted_msg[..], &mut decrypted_msg).unwrap(); 97 | 98 | assert_eq!(&decrypted_msg[..], message.as_bytes()); 99 | } 100 | -------------------------------------------------------------------------------- /dnsserver/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dnsserver" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | async-std = { workspace = true } 11 | hermesdns = { path = "../hermesdns" } 12 | config = { path = "../config" } 13 | async-trait = { workspace = true } 14 | tracing = { workspace = true } 15 | async-std-resolver = { workspace = true } 16 | store = { path = "../store" } 17 | parking_lot = { workspace = true } 18 | 19 | [dev-dependencies] 20 | tempfile = { workspace = true } 21 | tracing-subscriber = { workspace = true } 22 | -------------------------------------------------------------------------------- /dnsserver/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod resolver; 2 | 3 | use async_std_resolver::AsyncStdResolver; 4 | use config::rule::ProxyRules; 5 | use hermesdns::DnsUdpServer; 6 | use resolver::RuleBasedDnsResolver; 7 | 8 | pub async fn create_dns_server( 9 | listens: Vec, 10 | bypass_direct: bool, 11 | rules: ProxyRules, 12 | async_resolver: AsyncStdResolver, 13 | ) -> (DnsUdpServer, RuleBasedDnsResolver) { 14 | let resolver = RuleBasedDnsResolver::new(bypass_direct, rules, async_resolver).await; 15 | let server = DnsUdpServer::new(listens, Box::new(resolver.clone())).await; 16 | (server, resolver) 17 | } 18 | 19 | #[cfg(test)] 20 | pub(crate) mod tests { 21 | use super::*; 22 | use async_std::io; 23 | use async_std::task; 24 | use async_std_resolver::config::{NameServerConfigGroup, ResolverConfig, ResolverOpts}; 25 | use hermesdns::{DnsClient, DnsNetworkClient, QueryType}; 26 | use std::time::Duration; 27 | 28 | const LOCAL_UDP_PORT: u16 = 6153; 29 | async fn get_ip(client: &DnsNetworkClient, host: &str) -> Option { 30 | let _a = client.get_failed_count(); 31 | let resp = io::timeout( 32 | Duration::from_secs(10), 33 | client.send_query(host, QueryType::A, ("127.0.0.1", LOCAL_UDP_PORT), true), 34 | ) 35 | .await 36 | .unwrap(); 37 | resp.get_random_a() 38 | } 39 | 40 | pub(crate) async fn new_resolver(ip: String, port: u16) -> AsyncStdResolver { 41 | let name_servers = 42 | NameServerConfigGroup::from_ips_clear(&[ip.parse().unwrap()], port, false); 43 | 44 | // Construct a new Resolver with default configuration options 45 | async_std_resolver::resolver( 46 | ResolverConfig::from_parts(None, Vec::new(), name_servers), 47 | ResolverOpts::default(), 48 | ) 49 | .await 50 | } 51 | 52 | #[test] 53 | fn test_resolve_ip() { 54 | store::Store::setup_global_for_test(); 55 | let dns = std::env::var("DNS").unwrap_or_else(|_| "114.114.114.114".to_string()); 56 | task::block_on(async { 57 | let resolver = new_resolver(dns, 53).await; 58 | let (server, resolver) = create_dns_server( 59 | vec![format!("0.0.0.0:{LOCAL_UDP_PORT}")], 60 | false, 61 | ProxyRules::new(vec![], None), 62 | resolver, 63 | ) 64 | .await; 65 | task::spawn(server.run_server()); 66 | task::sleep(Duration::from_secs(3)).await; 67 | let client = DnsNetworkClient::new(0, Duration::from_secs(50)).await; 68 | let ali_ip = get_ip(&client, "google.com").await; 69 | assert!(ali_ip.is_some()); 70 | let baidu_ip = get_ip(&client, "baidu.com").await; 71 | assert!(baidu_ip.is_some()); 72 | 73 | assert_eq!( 74 | resolver.lookup_host(&baidu_ip.unwrap()), 75 | Some("baidu.com".to_string()) 76 | ); 77 | assert_eq!( 78 | resolver.lookup_host(&ali_ip.unwrap()), 79 | Some("google.com".to_string()) 80 | ) 81 | }); 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /hermesdns/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hermesdns" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | rand = { workspace = true } 11 | chrono = { workspace = true } 12 | async-trait = { workspace = true } 13 | tracing = { workspace = true } 14 | tracing-futures = { workspace = true, features = ["std-future"] } 15 | async-std = { workspace = true, features = ["unstable"] } 16 | -------------------------------------------------------------------------------- /hermesdns/src/dns/authority.rs: -------------------------------------------------------------------------------- 1 | //! contains the data store for local zones 2 | #![allow(dead_code)] 3 | use crate::dns::buffer::{PacketBuffer, StreamPacketBuffer, VectorPacketBuffer}; 4 | use crate::dns::protocol::{DnsPacket, DnsRecord, QueryType, ResultCode, TransientTtl}; 5 | use async_std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; 6 | use std::collections::{BTreeMap, BTreeSet}; 7 | use std::fs::File; 8 | use std::io::{Result, Write}; 9 | use std::path::Path; 10 | 11 | #[derive(Clone, Debug, Default)] 12 | pub struct Zone { 13 | pub domain: String, 14 | pub m_name: String, 15 | pub r_name: String, 16 | pub serial: u32, 17 | pub refresh: u32, 18 | pub retry: u32, 19 | pub expire: u32, 20 | pub minimum: u32, 21 | pub records: BTreeSet, 22 | } 23 | 24 | impl Zone { 25 | pub fn new(domain: String, m_name: String, r_name: String) -> Zone { 26 | Zone { 27 | domain, 28 | m_name, 29 | r_name, 30 | serial: 0, 31 | refresh: 0, 32 | retry: 0, 33 | expire: 0, 34 | minimum: 0, 35 | records: BTreeSet::new(), 36 | } 37 | } 38 | 39 | pub fn add_record(&mut self, rec: &DnsRecord) -> bool { 40 | self.records.insert(rec.clone()) 41 | } 42 | 43 | pub fn delete_record(&mut self, rec: &DnsRecord) -> bool { 44 | self.records.remove(rec) 45 | } 46 | } 47 | 48 | #[derive(Default)] 49 | pub struct Zones { 50 | zones: BTreeMap, 51 | } 52 | 53 | impl<'a> Zones { 54 | pub fn new() -> Zones { 55 | Zones { 56 | zones: BTreeMap::new(), 57 | } 58 | } 59 | 60 | pub fn load(&mut self) -> Result<()> { 61 | let zones_dir = match Path::new("zones").read_dir() { 62 | Ok(dir) => dir, 63 | Err(_) => { 64 | return Ok(()); 65 | } 66 | }; 67 | 68 | for wrapped_filename in zones_dir { 69 | let filename = match wrapped_filename { 70 | Ok(x) => x, 71 | Err(_) => continue, 72 | }; 73 | 74 | let mut zone_file = match File::open(filename.path()) { 75 | Ok(x) => x, 76 | Err(_) => continue, 77 | }; 78 | 79 | let mut buffer = StreamPacketBuffer::new(&mut zone_file); 80 | 81 | let mut zone = Zone::new(String::new(), String::new(), String::new()); 82 | buffer.read_qname(&mut zone.domain)?; 83 | buffer.read_qname(&mut zone.m_name)?; 84 | buffer.read_qname(&mut zone.r_name)?; 85 | zone.serial = buffer.read_u32()?; 86 | zone.refresh = buffer.read_u32()?; 87 | zone.retry = buffer.read_u32()?; 88 | zone.expire = buffer.read_u32()?; 89 | zone.minimum = buffer.read_u32()?; 90 | 91 | let record_count = buffer.read_u32()?; 92 | 93 | for _ in 0..record_count { 94 | let rr = DnsRecord::read(&mut buffer)?; 95 | zone.add_record(&rr); 96 | } 97 | 98 | println!("Loaded zone {} with {} records", zone.domain, record_count); 99 | 100 | self.zones.insert(zone.domain.clone(), zone); 101 | } 102 | 103 | Ok(()) 104 | } 105 | 106 | pub fn save(&mut self) -> Result<()> { 107 | let zones_dir = Path::new("zones"); 108 | for zone in self.zones.values() { 109 | let filename = zones_dir.join(Path::new(&zone.domain)); 110 | let mut zone_file = match File::create(&filename) { 111 | Ok(x) => x, 112 | Err(_) => { 113 | println!("Failed to save file {filename:?}"); 114 | continue; 115 | } 116 | }; 117 | 118 | let mut buffer = VectorPacketBuffer::new(); 119 | let _ = buffer.write_qname(&zone.domain); 120 | let _ = buffer.write_qname(&zone.m_name); 121 | let _ = buffer.write_qname(&zone.r_name); 122 | let _ = buffer.write_u32(zone.serial); 123 | let _ = buffer.write_u32(zone.refresh); 124 | let _ = buffer.write_u32(zone.retry); 125 | let _ = buffer.write_u32(zone.expire); 126 | let _ = buffer.write_u32(zone.minimum); 127 | let _ = buffer.write_u32(zone.records.len() as u32); 128 | 129 | for rec in &zone.records { 130 | let _ = rec.write(&mut buffer); 131 | } 132 | 133 | let _ = zone_file.write(&buffer.buffer[0..buffer.pos]); 134 | } 135 | 136 | Ok(()) 137 | } 138 | 139 | pub fn zones(&self) -> Vec<&Zone> { 140 | self.zones.values().collect() 141 | } 142 | 143 | pub fn add_zone(&mut self, zone: Zone) { 144 | self.zones.insert(zone.domain.clone(), zone); 145 | } 146 | 147 | pub fn get_zone(&'a self, domain: &str) -> Option<&'a Zone> { 148 | self.zones.get(domain) 149 | } 150 | 151 | pub fn get_zone_mut(&'a mut self, domain: &str) -> Option<&'a mut Zone> { 152 | self.zones.get_mut(domain) 153 | } 154 | } 155 | 156 | #[derive(Default)] 157 | pub struct Authority { 158 | zones: RwLock, 159 | } 160 | 161 | impl Authority { 162 | pub fn new() -> Authority { 163 | Authority { 164 | zones: RwLock::new(Zones::new()), 165 | } 166 | } 167 | 168 | pub async fn load(&self) -> Result<()> { 169 | let mut zones = self.zones.write().await; 170 | zones.load() 171 | } 172 | 173 | pub async fn query(&self, qname: &str, qtype: QueryType) -> Option { 174 | let zones = self.zones.read().await; 175 | 176 | let mut best_match = None; 177 | for zone in zones.zones() { 178 | if !qname.ends_with(&zone.domain) { 179 | continue; 180 | } 181 | 182 | if let Some((len, _)) = best_match { 183 | if len < zone.domain.len() { 184 | best_match = Some((zone.domain.len(), zone)); 185 | } 186 | } else { 187 | best_match = Some((zone.domain.len(), zone)); 188 | } 189 | } 190 | 191 | let zone = match best_match { 192 | Some((_, zone)) => zone, 193 | None => return None, 194 | }; 195 | 196 | let mut packet = DnsPacket::new(); 197 | packet.header.authoritative_answer = true; 198 | 199 | for rec in &zone.records { 200 | let domain = match rec.get_domain() { 201 | Some(x) => x, 202 | None => continue, 203 | }; 204 | 205 | if domain != qname { 206 | continue; 207 | } 208 | 209 | let rtype = rec.get_querytype(); 210 | if qtype == rtype || (qtype == QueryType::A && rtype == QueryType::CNAME) { 211 | packet.answers.push(rec.clone()); 212 | } 213 | } 214 | 215 | if packet.answers.is_empty() { 216 | packet.header.rescode = ResultCode::NXDOMAIN; 217 | 218 | packet.authorities.push(DnsRecord::SOA { 219 | domain: zone.domain.clone(), 220 | m_name: zone.m_name.clone(), 221 | r_name: zone.r_name.clone(), 222 | serial: zone.serial, 223 | refresh: zone.refresh, 224 | retry: zone.retry, 225 | expire: zone.expire, 226 | minimum: zone.minimum, 227 | ttl: TransientTtl(zone.minimum), 228 | }); 229 | } 230 | 231 | Some(packet) 232 | } 233 | 234 | pub async fn read(&self) -> RwLockReadGuard<'_, Zones> { 235 | self.zones.read().await 236 | } 237 | 238 | pub async fn write(&self) -> RwLockWriteGuard<'_, Zones> { 239 | self.zones.write().await 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /hermesdns/src/dns/context.rs: -------------------------------------------------------------------------------- 1 | //! The `ServerContext in this thread holds the common state across the server 2 | 3 | use crate::dns::resolve::DnsResolver; 4 | 5 | pub enum ResolveStrategy { 6 | Recursive, 7 | Forward { host: String, port: u16 }, 8 | } 9 | 10 | pub struct ServerContext { 11 | pub listens: Vec, 12 | pub resolver: Box, 13 | pub allow_recursive: bool, 14 | } 15 | 16 | impl ServerContext { 17 | pub async fn new( 18 | listens: Vec, 19 | resolver: Box, 20 | ) -> ServerContext { 21 | Self { 22 | listens, 23 | resolver, 24 | allow_recursive: true, 25 | } 26 | } 27 | } 28 | 29 | #[cfg(test)] 30 | pub mod tests { 31 | 32 | use std::sync::Arc; 33 | 34 | use crate::dns::client::tests::{DnsStubClient, StubCallback}; 35 | use crate::dns::resolve::{ForwardingDnsResolver, RecursiveDnsResolver}; 36 | 37 | use super::*; 38 | 39 | pub async fn create_test_context( 40 | callback: Box, 41 | resolve_strategy: ResolveStrategy, 42 | ) -> Arc { 43 | match resolve_strategy { 44 | ResolveStrategy::Recursive => Arc::new( 45 | ServerContext::new( 46 | vec!["127.0.0.1:53".into()], 47 | Box::new( 48 | RecursiveDnsResolver::new(true, Box::new(DnsStubClient::new(callback))) 49 | .await, 50 | ), 51 | ) 52 | .await, 53 | ), 54 | ResolveStrategy::Forward { host, port } => Arc::new( 55 | ServerContext::new( 56 | vec!["127.0.0.1:53".into()], 57 | Box::new( 58 | ForwardingDnsResolver::new( 59 | (host, port), 60 | true, 61 | Box::new(DnsStubClient::new(callback)), 62 | ) 63 | .await, 64 | ), 65 | ) 66 | .await, 67 | ), 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /hermesdns/src/dns/mod.rs: -------------------------------------------------------------------------------- 1 | //! The dns module implements the DNS protocol and the related functions 2 | 3 | pub mod authority; 4 | pub mod buffer; 5 | pub mod cache; 6 | pub mod client; 7 | pub mod context; 8 | pub mod protocol; 9 | pub mod resolve; 10 | pub mod server; 11 | 12 | //mod netutil; 13 | -------------------------------------------------------------------------------- /hermesdns/src/hosts.rs: -------------------------------------------------------------------------------- 1 | use async_std::net::Ipv4Addr; 2 | use std::collections::HashMap; 3 | use std::error::Error; 4 | use std::fmt; 5 | use std::fmt::Display; 6 | use std::fs::File; 7 | use std::io::Read; 8 | 9 | const HOSTS_PATH: &str = "/etc/hosts"; 10 | 11 | #[derive(Debug, PartialEq, Eq)] 12 | pub struct Hosts { 13 | map: HashMap, 14 | } 15 | 16 | #[derive(Debug, PartialEq, Eq)] 17 | pub struct LoadHostError(String); 18 | 19 | impl Display for LoadHostError { 20 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 21 | write!(f, "{}", self.0) 22 | } 23 | } 24 | 25 | impl Error for LoadHostError {} 26 | 27 | impl From for LoadHostError { 28 | fn from(s: String) -> Self { 29 | LoadHostError(s) 30 | } 31 | } 32 | 33 | impl From<&'static str> for LoadHostError { 34 | fn from(s: &str) -> Self { 35 | LoadHostError(s.to_string()) 36 | } 37 | } 38 | 39 | impl Hosts { 40 | pub fn load() -> Result { 41 | let mut f = File::open(HOSTS_PATH).map_err(|_| LoadHostError::from("open /etc/hosts"))?; 42 | let mut content = String::new(); 43 | let _ = f 44 | .read_to_string(&mut content) 45 | .map_err(|_| LoadHostError::from("read /etc/hosts"))?; 46 | Hosts::parse(&content) 47 | } 48 | 49 | fn parse(content: &str) -> Result { 50 | let mut map = HashMap::new(); 51 | for line in content.lines() { 52 | let line = line.trim(); 53 | if line.starts_with('#') { 54 | continue; 55 | } 56 | let strip_comment = line.chars().take_while(|c| *c != '#').collect::(); 57 | let segments = strip_comment.split_whitespace().collect::>(); 58 | if segments.len() < 2 { 59 | continue; 60 | } 61 | let ip: Ipv4Addr = match segments[0].parse() { 62 | Ok(ip) => ip, 63 | Err(_) => continue, 64 | }; 65 | for host in &segments[1..] { 66 | map.insert((*host).to_string(), ip); 67 | } 68 | } 69 | Ok(Hosts { map }) 70 | } 71 | 72 | pub fn get(&self, domain: &str) -> Option { 73 | self.map.get(domain).cloned() 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use super::*; 80 | 81 | #[test] 82 | fn test_load_hosts() { 83 | let hosts = Hosts::parse( 84 | r#" 85 | ## 86 | # Host Database 87 | # 88 | # localhost is used to configure the loopback interface 89 | # when the system is booting. Do not change this entry. 90 | ## 91 | 127.0.0.1 localhost proxyhost 92 | 255.255.255.255 broadcasthost 93 | ::1 localhost 94 | 95 | # Updated automatically when Wi-Fi ip address changed 96 | ### BEGIN GENERATED CONTENT 97 | 192.168.2.111 influxdb registry.xiachufang.com pypi.xiachufang.com 98 | ### END GENERATED CONTENT 99 | 100 | 127.0.0.1 kubernetes.docker.internal 101 | # 102 | # 103 | 127.0.0.1 devdb 104 | 127.0.0.1 board-db-01 board-db-02 board-db-03 board-db-04 105 | # Added by Docker Desktop 106 | # To allow the same kube context to work on the host and the container: 107 | 127.0.0.1 kubernetes.docker.internal 108 | # End of section 109 | "#, 110 | ) 111 | .unwrap(); 112 | let mut map = HashMap::new(); 113 | map.insert( 114 | "kubernetes.docker.internal".to_string(), 115 | "127.0.0.1".parse().unwrap(), 116 | ); 117 | map.insert( 118 | "registry.xiachufang.com".to_string(), 119 | "192.168.2.111".parse().unwrap(), 120 | ); 121 | map.insert( 122 | "broadcasthost".to_string(), 123 | "255.255.255.255".parse().unwrap(), 124 | ); 125 | map.insert("board-db-01".to_string(), "127.0.0.1".parse().unwrap()); 126 | map.insert("board-db-02".to_string(), "127.0.0.1".parse().unwrap()); 127 | map.insert("board-db-03".to_string(), "127.0.0.1".parse().unwrap()); 128 | map.insert("board-db-04".to_string(), "127.0.0.1".parse().unwrap()); 129 | map.insert("devdb".to_string(), "127.0.0.1".parse().unwrap()); 130 | map.insert("proxyhost".to_string(), "127.0.0.1".parse().unwrap()); 131 | map.insert("localhost".to_string(), "127.0.0.1".parse().unwrap()); 132 | map.insert( 133 | "pypi.xiachufang.com".to_string(), 134 | "192.168.2.111".parse().unwrap(), 135 | ); 136 | map.insert("influxdb".to_string(), "192.168.2.111".parse().unwrap()); 137 | let expected = Hosts { map }; 138 | assert_eq!(hosts.map.len(), expected.map.len()); 139 | assert_eq!(hosts.map, expected.map); 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /hermesdns/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::unreadable_literal)] 2 | 3 | mod dns; 4 | mod hosts; 5 | 6 | pub use dns::client::{DnsClient, DnsNetworkClient}; 7 | pub use dns::context::{ResolveStrategy, ServerContext}; 8 | pub use dns::protocol::{DnsPacket, DnsRecord, QueryType, TransientTtl}; 9 | pub use dns::resolve::{DnsResolver, ForwardingDnsResolver, RecursiveDnsResolver}; 10 | pub use dns::server::DnsUdpServer; 11 | pub use hosts::{Hosts, LoadHostError}; 12 | -------------------------------------------------------------------------------- /http_proxy_client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "http_proxy_client" 3 | version = "20250331.0.0" 4 | authors = ["Alex.F "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | async-std = { workspace = true } 11 | config = { path = "../config" } 12 | base64 = { workspace = true } 13 | async-tls = { workspace = true } 14 | parking_lot = { workspace = true } 15 | -------------------------------------------------------------------------------- /http_proxy_client/src/http.rs: -------------------------------------------------------------------------------- 1 | use async_std::io::prelude::{Read, ReadExt, Write, WriteExt}; 2 | use async_std::net::{SocketAddr, TcpStream}; 3 | use async_std::task::{Context, Poll}; 4 | use config::Address; 5 | use std::io::{ErrorKind, Result}; 6 | use std::pin::Pin; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct HttpProxyTcpStream { 10 | conn: TcpStream, 11 | } 12 | 13 | impl HttpProxyTcpStream { 14 | pub async fn connect( 15 | proxy_server: SocketAddr, 16 | addr: Address, 17 | username: Option<&str>, 18 | password: Option<&str>, 19 | ) -> Result { 20 | let mut conn = TcpStream::connect(proxy_server).await?; 21 | let authorization = match (username, password) { 22 | (Some(username), Some(password)) => base64::encode(format!("{username}:{password}")), 23 | _ => "".to_string(), 24 | }; 25 | let mut req_buf = vec![format!("CONNECT {addr} HTTP/1.1")]; 26 | if !authorization.is_empty() { 27 | req_buf.push(format!("Proxy-Authorization: Basic {authorization}")); 28 | } 29 | req_buf.push(format!("Host: {addr}")); 30 | req_buf.push("\r\n".to_string()); 31 | let req: String = req_buf.join("\r\n"); 32 | conn.write_all(req.as_bytes()).await?; 33 | let mut buf = vec![0; 1500]; 34 | let size = conn.read(&mut buf).await?; 35 | let resp = String::from_utf8_lossy(&buf[..size]); 36 | if !resp.trim().starts_with("HTTP/1.1 2") { 37 | return Err(ErrorKind::NotConnected.into()); 38 | } 39 | Ok(HttpProxyTcpStream { conn }) 40 | } 41 | } 42 | 43 | impl Read for HttpProxyTcpStream { 44 | fn poll_read( 45 | self: Pin<&mut Self>, 46 | cx: &mut Context<'_>, 47 | buf: &mut [u8], 48 | ) -> Poll> { 49 | Pin::new(&mut &self.conn).poll_read(cx, buf) 50 | } 51 | } 52 | 53 | impl Write for HttpProxyTcpStream { 54 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 55 | Pin::new(&mut &self.conn).poll_write(cx, buf) 56 | } 57 | 58 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 59 | Pin::new(&mut &self.conn).poll_flush(cx) 60 | } 61 | 62 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 63 | Pin::new(&mut &self.conn).poll_close(cx) 64 | } 65 | } 66 | 67 | impl Read for &HttpProxyTcpStream { 68 | fn poll_read( 69 | self: Pin<&mut Self>, 70 | cx: &mut Context<'_>, 71 | buf: &mut [u8], 72 | ) -> Poll> { 73 | Pin::new(&mut &self.conn).poll_read(cx, buf) 74 | } 75 | } 76 | 77 | impl Write for &HttpProxyTcpStream { 78 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 79 | Pin::new(&mut &self.conn).poll_write(cx, buf) 80 | } 81 | 82 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 83 | Pin::new(&mut &self.conn).poll_flush(cx) 84 | } 85 | 86 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 87 | Pin::new(&mut &self.conn).poll_close(cx) 88 | } 89 | } 90 | // 91 | // #[cfg(test)] 92 | // mod tests { 93 | // use super::*; 94 | // use async_std::io::prelude::{ReadExt, WriteExt}; 95 | // use async_std::task::block_on; 96 | // use std::net::ToSocketAddrs; 97 | // 98 | // #[test] 99 | // fn test_req_baidu() -> Result<()> { 100 | // block_on(async { 101 | // let proxy_server = ""; 102 | // let username = Some(""); 103 | // let password = Some(""); 104 | // let mut conn = HttpProxyTcpStream::connect( 105 | // proxy_server.to_socket_addrs().unwrap().next().unwrap(), 106 | // Address::DomainNameAddress("twitter.com".to_string(), 80), 107 | // username, 108 | // password, 109 | // ) 110 | // .await?; 111 | // conn.write_all(r#"GET / HTTP/1.1\r\nHost: twitter.com\r\n\r\n"#.as_bytes()) 112 | // .await?; 113 | // let mut resp = vec![0; 1024]; 114 | // let size = conn.read(&mut resp).await?; 115 | // let resp_text = String::from_utf8_lossy(&resp[..size]).to_string(); 116 | // assert!(dbg!(resp_text).contains("HTTP/1.1")); 117 | // Ok(()) 118 | // }) 119 | // } 120 | // } 121 | -------------------------------------------------------------------------------- /http_proxy_client/src/https.rs: -------------------------------------------------------------------------------- 1 | use async_std::io::prelude::{Read, ReadExt, Write, WriteExt}; 2 | use async_std::net::{SocketAddr, TcpStream}; 3 | use async_std::task::{Context, Poll}; 4 | use async_tls::client::TlsStream; 5 | use async_tls::TlsConnector; 6 | use config::Address; 7 | use parking_lot::Mutex; 8 | use std::io::Error; 9 | use std::io::{ErrorKind, Result}; 10 | use std::pin::Pin; 11 | use std::sync::Arc; 12 | 13 | #[derive(Debug, Clone)] 14 | pub struct HttpsProxyTcpStream { 15 | conn: Arc>>, 16 | } 17 | 18 | impl HttpsProxyTcpStream { 19 | pub async fn connect( 20 | proxy_server: SocketAddr, 21 | proxy_server_domain: &str, 22 | addr: Address, 23 | username: Option<&str>, 24 | password: Option<&str>, 25 | ) -> Result { 26 | let connector = TlsConnector::default(); 27 | let stream = TcpStream::connect(proxy_server).await?; 28 | let mut conn = connector.connect(proxy_server_domain, stream).await?; 29 | let authorization = match (username, password) { 30 | (Some(username), Some(password)) => base64::encode(format!("{username}:{password}")), 31 | _ => "".to_string(), 32 | }; 33 | let mut req_buf = vec![format!("CONNECT {addr} HTTP/1.1")]; 34 | if !authorization.is_empty() { 35 | req_buf.push(format!("Proxy-Authorization: basic {authorization}")); 36 | } 37 | req_buf.push(format!("Host: {addr}")); 38 | req_buf.push("\r\n".to_string()); 39 | let req: String = req_buf.join("\r\n"); 40 | conn.write_all(req.as_bytes()).await?; 41 | let mut buf = vec![0; 1500]; 42 | let size = conn.read(&mut buf).await?; 43 | let resp = String::from_utf8_lossy(&buf[..size]); 44 | if !resp.trim().starts_with("HTTP/1.1 2") { 45 | return Err(Error::new(ErrorKind::NotConnected, resp)); 46 | } 47 | Ok(HttpsProxyTcpStream { 48 | conn: Arc::new(Mutex::new(conn)), 49 | }) 50 | } 51 | } 52 | 53 | impl Read for HttpsProxyTcpStream { 54 | fn poll_read( 55 | self: Pin<&mut Self>, 56 | cx: &mut Context<'_>, 57 | buf: &mut [u8], 58 | ) -> Poll> { 59 | Pin::new(&mut &*self).poll_read(cx, buf) 60 | } 61 | } 62 | 63 | impl Write for HttpsProxyTcpStream { 64 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 65 | Pin::new(&mut &*self).poll_write(cx, buf) 66 | } 67 | 68 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 69 | Pin::new(&mut &*self).poll_flush(cx) 70 | } 71 | 72 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 73 | Pin::new(&mut &*self).poll_close(cx) 74 | } 75 | } 76 | 77 | impl Read for &HttpsProxyTcpStream { 78 | fn poll_read( 79 | self: Pin<&mut Self>, 80 | cx: &mut Context<'_>, 81 | buf: &mut [u8], 82 | ) -> Poll> { 83 | Pin::new(&mut *self.conn.lock()).poll_read(cx, buf) 84 | } 85 | } 86 | 87 | impl Write for &HttpsProxyTcpStream { 88 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 89 | Pin::new(&mut *self.conn.lock()).poll_write(cx, buf) 90 | } 91 | 92 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 93 | Pin::new(&mut *self.conn.lock()).poll_flush(cx) 94 | } 95 | 96 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 97 | Pin::new(&mut *self.conn.lock()).poll_close(cx) 98 | } 99 | } 100 | // 101 | // #[cfg(test)] 102 | // mod tests { 103 | // use super::*; 104 | // use async_std::io::prelude::{ReadExt, WriteExt}; 105 | // use async_std::task::block_on; 106 | // use std::net::ToSocketAddrs; 107 | // 108 | // #[test] 109 | // fn test_req_twitter() -> Result<()> { 110 | // block_on(async { 111 | // let proxy_domain = ""; 112 | // let port = 443; 113 | // let proxy_server = format!("{}:{}", proxy_domain, port); 114 | // let username = Some(""); 115 | // let password = Some(""); 116 | // let target_host = "twitter.com"; 117 | // let stream = HttpsProxyTcpStream::connect( 118 | // proxy_server.to_socket_addrs().unwrap().next().unwrap(), 119 | // proxy_domain.to_string(), 120 | // Address::DomainNameAddress(target_host.to_string(), 443), 121 | // username, 122 | // password, 123 | // ) 124 | // .await 125 | // .expect("connect proxy error"); 126 | // 127 | // let connector: TlsConnector = TlsConnector::default(); 128 | // 129 | // let mut conn = connector 130 | // .connect(target_host, stream) 131 | // .await 132 | // .expect("connect proxy domain"); 133 | // 134 | // conn.write_all( 135 | // format!( 136 | // "GET / HTTP/1.1\r\nHost: {}\r\nUser-Agent: curl/7.64.1\r\nAccept: */*\r\n\r\n", 137 | // target_host 138 | // ) 139 | // .as_bytes(), 140 | // ) 141 | // .await?; 142 | // let mut resp = vec![0; 1024]; 143 | // let size = conn.read(&mut resp).await?; 144 | // 145 | // let resp_text = String::from_utf8_lossy(&resp[..size]).to_string(); 146 | // eprintln!("{}", &resp_text); 147 | // assert!(resp_text.contains("HTTP/1.1")); 148 | // Ok(()) 149 | // }) 150 | // } 151 | // } 152 | -------------------------------------------------------------------------------- /http_proxy_client/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod http; 2 | mod https; 3 | 4 | pub use http::HttpProxyTcpStream; 5 | pub use https::HttpsProxyTcpStream; 6 | -------------------------------------------------------------------------------- /sample_config.yml: -------------------------------------------------------------------------------- 1 | verbose: false 2 | dns_start_ip: 11.0.0.10 3 | # 可以指定多个 DNS 服务器,如果不指定则使用系统默认的 DNS 服务器。一般最好指定,否则 Wi-Fi 切换的时候可能会出现 DNS 服务器无法访问的问题。 4 | # 一般 DHCP 获取 IP 的时候会自动获取 DNS 服务器,切换 Wi-Fi 的时候,DNS 服务器也会发生变化。 5 | dns_servers: 6 | - 223.5.5.5:53 7 | - 114.114.114.114:53 8 | - tcp://114.114.114.114:53 9 | dns_timeout: 1s 10 | # 直连的域名直接返回真实IP,不走tun 11 | tun_bypass_direct: true 12 | # redir 模式使用 iptable 的 redirect 功能: iptables -t nat -A PREROUTING -d 11.0.0.0/16 -p tcp -j REDIRECT --to-ports 1300 13 | # redir 模式下只支持 tcp 流量。默认使用 tun 模式。特殊设备不支持 tun 的情况,可以使用 redir 模式。 14 | redir_mode: false 15 | # 队列数量,linux 有效。至少为 1,queue_number 越大,性能越好。一般 2 就可以跑满 1Gbps 带宽 16 | queue_number: 2 17 | # 每个队列的线程数量,主要工作是处理 tun 数据转发时对数据包进行 checksum 计算。一般 2-3 就可以处理一个 queue 的转发。 18 | threads_per_queue: 3 19 | tun_name: utun4 20 | tun_ip: 11.0.0.1 21 | tun_cidr: 11.0.0.0/16 22 | # 数据库路径,如果使用相对路径,相对于可执行文件的路径。默认会搜索可执行文件同级目录下的 seeker.sqlite 文件 23 | db_path: seeker.sqlite 24 | dns_listens: 25 | # 如果本机没有其他程序监听 53 端口,可以使用 0.0.0.0 26 | - 0.0.0.0:53 27 | # Ubuntu 等新版本的系统默认使用 systemd-resolved,已经监听了 127.0.0.53:53,所以没法直接监听 0.0.0.0:53。 28 | # 这种情况下可以监听 127.0.0.1:53。如果使用 docker,再监听 172.17.0.1:53。如果局域网内别的机器需要走代理,可以监听 29 | # 192.168.0.xx:53(本机的局域网 IP) 30 | - 127.0.0.1:53 31 | - 172.17.0.1:53 32 | - 192.168.0.3:53 33 | # 是否支持局域网内其他机器走代理 34 | gateway_mode: true 35 | # probe_timeout 时间内如果可以建立 TCP 连接则直连(443端口会额外建立 SSL 连接),否则走代理。不要调的太低,国内有些网站会有很长的 SSL 握手时间。 36 | # 如果目标端口为 443,TCP 连接的超时时间为 probe_timeout, SSL 连接超时时间也为 probe_timeout,总的超时时间为 probe_timeout * 2; 37 | # 如果目标端口不为 443,TCP 连接的超时时间为 probe_timeout。 38 | probe_timeout: 200ms 39 | # ping 超时时间 40 | ping_timeout: 2s 41 | # 连接超时时间 42 | connect_timeout: 2s 43 | # 读取超时时间,多久没有数据读取则认为超时 44 | read_timeout: 300s 45 | # 写入超时时间,多久没有数据写入则认为超时 46 | write_timeout: 300s 47 | # ss 服务器重试次数,到达重试次数后会自动选择下一个最快的服务器 48 | max_connect_errors: 2 49 | # geoip 数据库路径,如果使用相对路径,相对于可执行文件的路径。默认会搜索可执行文件同级目录下的 geoip.mmdb 文件 50 | # 可以从 https://github.com/Hackl0us/GeoIP2-CN 下载 mmdb 格式的文件 51 | geo_ip: path/to/geoip.mmdb 52 | # 测试 ping 的默认 URL 列表,用于测试代理的连通性。 53 | # 连接成功即认为代理可用,不判断 HTTP 返回码。 54 | ping_urls: 55 | - host: www.facebook.com 56 | port: 80 57 | path: / 58 | - host: www.youtube.com 59 | port: 80 60 | path: / 61 | - host: twitter.com 62 | port: 80 63 | path: / 64 | 65 | # ss 订阅地址,启动时自动拉群配置,并将配置的服务器地址自动加入服务器列表 66 | # 可以通过 `./seeker -c config.yml --encrypt --key password` 命令来生成加密后的配置。 67 | # 通过 `./seeker --config-url http://addr-to-ss-subscribe-url.com --key password` 命令来读取远程配置启动。 68 | remote_config_urls: 69 | - https://addr-to-ss-subscribe-url.com 70 | 71 | # 服务器列表 72 | servers: 73 | - name: server-http 74 | addr: 127.0.0.1:1087 # 替换成 http 代理的地址 75 | username: 76 | password: 77 | # 协议,Http 或 Https 或 Socks5 或 Shadowsocks 78 | protocol: Http 79 | - name: server-ss1 80 | addr: domain-to-ss-server.com # 替换成 ss 服务器的地址 81 | method: chacha20-ietf 82 | password: password 83 | protocol: Shadowsocks 84 | - name: server-ss2 85 | addr: 128.113.23.12:12312 86 | method: chacha20-ietf 87 | password: password 88 | protocol: Shadowsocks 89 | obfs: # 不设置默认不使用 obfs 90 | mode: Http # 目前只支持 Http 91 | host: c61be5399e.microsoft.com 92 | 93 | # 代理组,可以指定多个代理组,每个代理组可以指定多个代理服务器。 94 | proxy_groups: 95 | # 代理组名称,名称不能重复。用来在规则中引用:PROXY(proxy-group-1) PROBE(proxy-group-1) 96 | - name: proxy-group-1 97 | # 代理组中的代理服务器列表,名称必须是 servers 列表中定义的服务器名称 98 | proxies: 99 | - server-http 100 | - server-ss1 101 | # 测试 ping 的 URL 列表,用于测试代理的连通性。连接成功即认为代理可用,不判断 HTTP 返回码。 102 | # 如果不设置,默认使用顶层 ping_urls 列表中的 URL 进行测试。 103 | ping_urls: 104 | - host: github.com 105 | port: 443 106 | path: / 107 | # ping 超时时间,如果不设置,默认使用顶层 ping_timeout 配置。 108 | ping_timeout: 1s 109 | 110 | - name: proxy-group-2 111 | proxies: 112 | - server-ss2 113 | ping_urls: 114 | - host: google.com 115 | port: 443 116 | path: / 117 | ping_timeout: 3s 118 | 119 | # 规则,可以指定多个规则。优先级从上到下依次降低。 120 | # 规则格式:规则类型,规则值,规则动作 121 | # 122 | # 规则类型: 123 | # DOMAIN: 完整匹配域名 124 | # DOMAIN-SUFFIX: 域名后缀匹配 125 | # DOMAIN-KEYWORD: 域名关键字匹配 126 | # GEOIP: geoip 匹配 127 | # MATCH: 默认匹配 128 | # 129 | # 规则动作: 130 | # DIRECT: 直接访问 131 | # REJECT: 直接拒绝 132 | # PROXY(proxy-group-name): 使用代理组 proxy-group-name 中自动选择最快的代理服务器 133 | # PROBE(proxy-group-name): 同时发起两个连接,一个使用代理,一个直接连接。哪一个先连接成功则使用哪一个。 134 | # 并且缓存每一个域名的 PROBE 结果,下次先用缓存的结果连接,同时再重新发起探测。 135 | rules: 136 | # 完整匹配域名,直接访问 137 | - 'DOMAIN,audio-ssl.itunes.apple.com,DIRECT' 138 | # 完整匹配域名,直接拒绝 139 | - 'DOMAIN,gspe1-ssl.ls.apple.com,REJECT' 140 | # 域名后缀匹配,直接访问 141 | - 'DOMAIN-SUFFIX,aaplimg.com,DIRECT' 142 | - 'DOMAIN-SUFFIX,apple.co,DIRECT' 143 | # 域名关键字匹配,使用代理组 proxy-group-1 144 | - 'DOMAIN-KEYWORD,bbcfmt,PROXY(proxy-group-1)' 145 | - 'DOMAIN-KEYWORD,uk-live,PROXY(proxy-group-1)' 146 | # 域名后缀匹配,直接访问 147 | - 'DOMAIN-SUFFIX,snssdk.com,DIRECT' 148 | # 域名后缀匹配,使用代理组 proxy-group-2 149 | - 'DOMAIN-SUFFIX,toutiao.com,PROBE(proxy-group-2)' 150 | # ip 段匹配,使用代理组 proxy-group-1 151 | - 'IP-CIDR,183.23.0.0/16,PROXY(proxy-group-1)' 152 | # geoip 匹配,直接访问 153 | - 'GEOIP,CN,DIRECT' 154 | # 默认匹配,使用代理组 proxy-group-2 155 | - 'MATCH,PROBE(proxy-group-2)' 156 | -------------------------------------------------------------------------------- /seeker/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "seeker" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2024" 6 | repository = "https://github.com/gfreezy/seeker" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | tracing = { workspace = true, features = ["attributes"] } 12 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 13 | tracing-futures = { workspace = true, features = ["std-future"] } 14 | tracing-chrome = { workspace = true, optional = true } 15 | config = { path = "../config" } 16 | dnsserver = { path = "../dnsserver" } 17 | ssclient = { path = "../ssclient" } 18 | socks5_client = { path = "../socks5_client" } 19 | http_proxy_client = { path = "../http_proxy_client" } 20 | sysconfig = { path = "../sysconfig" } 21 | tun_nat = { path = "../tun_nat" } 22 | file-rotate = { workspace = true } 23 | async-std = { workspace = true, features = ["attributes"] } 24 | async-tls = { workspace = true } 25 | parking_lot = { workspace = true, features = ["deadlock_detection"] } 26 | ctrlc = { workspace = true, features = ["termination"] } 27 | libc = { workspace = true } 28 | futures-util = { workspace = true } 29 | clap = { workspace = true, features = ["derive"] } 30 | async-std-resolver = { workspace = true } 31 | ureq = { workspace = true, features = ["json"] } 32 | crypto = { path = "../crypto" } 33 | bytes = { workspace = true } 34 | base64 = { workspace = true } 35 | anyhow = { workspace = true } 36 | tcp_connection = { path = "../tcp_connection" } 37 | url = { workspace = true } 38 | store = { path = "../store" } 39 | nix = { workspace = true, features = ["socket", "net"] } 40 | os_socketaddr = { workspace = true } 41 | notify-debouncer-mini = { workspace = true } 42 | 43 | [dev-dependencies] 44 | tempfile = { workspace = true } 45 | 46 | [features] 47 | tracing-chrome = ["dep:tracing-chrome"] 48 | 49 | 50 | [package.metadata.bundle] 51 | name = "seeker" 52 | identifier = "io.allsunday.seeker" 53 | -------------------------------------------------------------------------------- /seeker/src/config_encryptor.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use bytes::BytesMut; 3 | use crypto::CipherType; 4 | use ssclient::{decrypt_payload, encrypt_payload}; 5 | use std::fs::File; 6 | use std::io::Read; 7 | 8 | pub fn decrypt_config( 9 | mut reader: R, 10 | cipher_type: CipherType, 11 | decrypt_key: &str, 12 | ) -> anyhow::Result> { 13 | let mut content = String::new(); 14 | let _size = reader 15 | .read_to_string(&mut content) 16 | .context("Read http response error")?; 17 | let b64decoded = base64::decode(content.trim().as_bytes()).context("base64 decode error")?; 18 | let mut output = BytesMut::new(); 19 | let size = decrypt_payload( 20 | cipher_type, 21 | &cipher_type.bytes_to_key(decrypt_key.as_bytes()), 22 | &b64decoded, 23 | &mut output, 24 | ) 25 | .context("decrypt payload error")?; 26 | Ok(output[..size].to_vec()) 27 | } 28 | 29 | pub fn encrypt_config( 30 | mut reader: R, 31 | cipher_type: CipherType, 32 | encrypt_key: &str, 33 | ) -> anyhow::Result { 34 | let mut buf = vec![]; 35 | let size = reader.read_to_end(&mut buf)?; 36 | 37 | let mut output = BytesMut::new(); 38 | let size = encrypt_payload( 39 | cipher_type, 40 | &cipher_type.bytes_to_key(encrypt_key.as_bytes()), 41 | &buf[..size], 42 | &mut output, 43 | )?; 44 | 45 | let content = base64::encode(&output[..size]); 46 | 47 | Ok(content) 48 | } 49 | 50 | pub fn encrypt_config_file( 51 | path: Option<&str>, 52 | encrypt_key: Option<&str>, 53 | ) -> anyhow::Result { 54 | let (Some(path), Some(key)) = (path, encrypt_key) else { 55 | return Err(anyhow::anyhow!("path and encrypt_key must be provided")); 56 | }; 57 | let file = File::open(path).context("Open config error")?; 58 | encrypt_config(file, CipherType::ChaCha20Ietf, key).context("Encrypt config error") 59 | } 60 | 61 | #[cfg(test)] 62 | mod tests { 63 | use crate::config_encryptor::{decrypt_config, encrypt_config}; 64 | use crypto::CipherType; 65 | 66 | #[test] 67 | fn test_base64() { 68 | let content = "halsdjflwefjklasdjflkasdjf"; 69 | let e = encrypt_config(content.as_bytes(), CipherType::ChaCha20Ietf, "test").unwrap(); 70 | let d = decrypt_config(e.as_bytes(), CipherType::ChaCha20Ietf, "test").unwrap(); 71 | assert_eq!(content, String::from_utf8(d).unwrap()); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /seeker/src/config_watcher.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use std::time::Duration; 3 | 4 | use notify_debouncer_mini::notify::{RecommendedWatcher, RecursiveMode}; 5 | use notify_debouncer_mini::{DebounceEventHandler, Debouncer, new_debouncer}; 6 | 7 | pub fn watch_config( 8 | config_path: PathBuf, 9 | action: F, 10 | ) -> Debouncer { 11 | let mut debouncer = 12 | new_debouncer(Duration::from_secs(1), action).expect("create debouncer error"); 13 | 14 | debouncer 15 | .watcher() 16 | .watch(&config_path, RecursiveMode::Recursive) 17 | .expect("watch path error"); 18 | 19 | debouncer 20 | } 21 | -------------------------------------------------------------------------------- /seeker/src/dns_client.rs: -------------------------------------------------------------------------------- 1 | use async_std_resolver::config::{ 2 | NameServerConfig, NameServerConfigGroup, Protocol, ResolverConfig, ResolverOpts, 3 | }; 4 | use async_std_resolver::{AsyncStdResolver, resolver}; 5 | use config::{Address, DnsServerAddr}; 6 | use std::io::{Error, ErrorKind, Result}; 7 | use std::net::IpAddr; 8 | use std::net::SocketAddr; 9 | use std::time::Duration; 10 | 11 | #[derive(Clone)] 12 | pub struct DnsClient { 13 | resolver: AsyncStdResolver, 14 | } 15 | 16 | impl DnsClient { 17 | pub async fn new(dns_servers: &[DnsServerAddr], timeout: Duration) -> Self { 18 | let mut name_servers = NameServerConfigGroup::with_capacity(dns_servers.len()); 19 | 20 | for addr in dns_servers { 21 | match addr { 22 | DnsServerAddr::UdpSocketAddr(addr) => { 23 | let udp = NameServerConfig { 24 | socket_addr: *addr, 25 | protocol: Protocol::Udp, 26 | tls_dns_name: None, 27 | trust_negative_responses: false, 28 | bind_addr: None, 29 | }; 30 | name_servers.push(udp); 31 | } 32 | DnsServerAddr::TcpSocketAddr(addr) => { 33 | if !["tcp", "tls"].contains(&addr.scheme()) { 34 | panic!("Invalid dns server address") 35 | } 36 | let tcp = NameServerConfig { 37 | socket_addr: format!("{}:{}", addr.host().unwrap(), addr.port().unwrap()) 38 | .parse() 39 | .unwrap(), 40 | protocol: Protocol::Tcp, 41 | tls_dns_name: None, 42 | trust_negative_responses: false, 43 | bind_addr: None, 44 | }; 45 | name_servers.push(tcp); 46 | } 47 | } 48 | } 49 | 50 | let num_concurrent_reqs = name_servers.len(); 51 | 52 | // Construct a new Resolver with default configuration options 53 | let resolver = resolver( 54 | ResolverConfig::from_parts(None, Vec::new(), name_servers), 55 | { 56 | let mut opts = ResolverOpts::default(); 57 | opts.timeout = timeout; 58 | opts.num_concurrent_reqs = num_concurrent_reqs; 59 | opts 60 | }, 61 | ) 62 | .await; 63 | 64 | DnsClient { resolver } 65 | } 66 | 67 | pub fn resolver(&self) -> AsyncStdResolver { 68 | self.resolver.clone() 69 | } 70 | pub async fn lookup(&self, domain: &str) -> Result { 71 | let response = 72 | self.resolver.lookup_ip(domain).await.map_err(|e| { 73 | Error::new(ErrorKind::NotFound, format!("{domain} not resolved.\n{e}")) 74 | })?; 75 | response.iter().next().ok_or_else(|| { 76 | Error::new( 77 | ErrorKind::NotFound, 78 | format!("no response returned for {domain}."), 79 | ) 80 | }) 81 | } 82 | 83 | #[tracing::instrument(skip(self))] 84 | pub async fn lookup_address(&self, addr: &Address) -> Result { 85 | match addr { 86 | Address::SocketAddress(a) => Ok(*a), 87 | Address::DomainNameAddress(domain, port) => { 88 | let ip = self.lookup(domain).await?; 89 | Ok(SocketAddr::new(ip, *port)) 90 | } 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /seeker/src/logger.rs: -------------------------------------------------------------------------------- 1 | use file_rotate::{FileRotate, suffix::AppendTimestamp}; 2 | use std::io; 3 | use std::path::PathBuf; 4 | use std::sync::{Arc, Mutex}; 5 | #[cfg(feature = "tracing-chrome")] 6 | use tracing_chrome::{ChromeLayerBuilder, FlushGuard}; 7 | use tracing_subscriber::prelude::*; 8 | use tracing_subscriber::{EnvFilter, Layer, Registry}; 9 | 10 | #[derive(Clone)] 11 | struct TracingWriter { 12 | file_rotate: Arc>>, 13 | } 14 | 15 | impl TracingWriter { 16 | fn new(file_rotate: Arc>>) -> Self { 17 | TracingWriter { file_rotate } 18 | } 19 | } 20 | 21 | impl io::Write for TracingWriter { 22 | fn write(&mut self, buf: &[u8]) -> io::Result { 23 | let mut guard = self.file_rotate.lock().unwrap(); 24 | guard.write(buf) 25 | } 26 | 27 | fn flush(&mut self) -> io::Result<()> { 28 | let mut guard = self.file_rotate.lock().unwrap(); 29 | guard.flush() 30 | } 31 | } 32 | 33 | pub(crate) struct LoggerGuard { 34 | #[cfg(feature = "tracing-chrome")] 35 | _chrome_layer_guard: Option, 36 | } 37 | 38 | pub(crate) fn setup_logger(log_path: Option<&str>, trace: bool) -> anyhow::Result { 39 | let env_filter = EnvFilter::new("seeker=trace") 40 | .add_directive("dnsserver=debug".parse()?) 41 | .add_directive("sysconfig=info".parse()?) 42 | .add_directive("config=info".parse()?) 43 | .add_directive("tun_nat=info".parse()?); 44 | 45 | let _chrome_layer_guard = if let Some(log_path) = log_path { 46 | if let Some(path) = PathBuf::from(log_path).parent() { 47 | std::fs::create_dir_all(path)?; 48 | } 49 | let logger = Arc::new(Mutex::new(FileRotate::new( 50 | log_path, 51 | AppendTimestamp::default(file_rotate::suffix::FileLimit::MaxFiles(10)), 52 | file_rotate::ContentLimit::Bytes(10_000_000), 53 | file_rotate::compression::Compression::None, 54 | #[cfg(unix)] 55 | None, 56 | ))); 57 | 58 | if trace { 59 | let fmt_layer = tracing_subscriber::fmt::layer() 60 | .with_ansi(false) 61 | .with_writer(move || TracingWriter::new(logger.clone())) 62 | .and_then(env_filter); 63 | 64 | #[cfg(feature = "tracing-chrome")] 65 | { 66 | let (chrome_layer, guard) = ChromeLayerBuilder::new() 67 | .include_args(true) 68 | .trace_style(tracing_chrome::TraceStyle::Async) 69 | .build(); 70 | 71 | let registry = Registry::default().with(fmt_layer).with(chrome_layer); 72 | 73 | tracing::subscriber::set_global_default(registry) 74 | .expect("setting tracing default failed"); 75 | Some(guard) 76 | } 77 | 78 | #[cfg(not(feature = "tracing-chrome"))] 79 | { 80 | let registry = Registry::default().with(fmt_layer); 81 | 82 | tracing::subscriber::set_global_default(registry) 83 | .expect("setting tracing default failed"); 84 | None::<()> 85 | } 86 | } else { 87 | let fmt_layer = tracing_subscriber::fmt::layer() 88 | .with_ansi(false) 89 | .with_file(true) 90 | .with_line_number(true) 91 | .with_writer(move || TracingWriter::new(logger.clone())) 92 | .and_then(env_filter); 93 | let registry = Registry::default().with(fmt_layer); 94 | 95 | tracing::subscriber::set_global_default(registry) 96 | .expect("setting tracing default failed"); 97 | None 98 | } 99 | } else { 100 | None 101 | }; 102 | 103 | let guard = LoggerGuard { 104 | #[cfg(feature = "tracing-chrome")] 105 | _chrome_layer_guard: _chrome_layer_guard, 106 | }; 107 | 108 | // #[cfg(debug_assertions)] 109 | { 110 | // only for #[cfg] 111 | use parking_lot::deadlock; 112 | use std::thread; 113 | use std::time::Duration; 114 | 115 | // Create a background thread which checks for deadlocks every 10s 116 | thread::spawn(move || { 117 | loop { 118 | thread::sleep(Duration::from_secs(10)); 119 | let deadlocks = deadlock::check_deadlock(); 120 | if deadlocks.is_empty() { 121 | continue; 122 | } 123 | 124 | eprintln!("{} deadlocks detected", deadlocks.len()); 125 | for (i, threads) in deadlocks.iter().enumerate() { 126 | eprintln!("Deadlock #{i}"); 127 | for t in threads { 128 | eprintln!("Thread Id {:#?}", t.thread_id()); 129 | eprintln!("{:#?}", t.backtrace()); 130 | } 131 | } 132 | } 133 | }); 134 | } // only for #[cfg] 135 | Ok(guard) 136 | } 137 | -------------------------------------------------------------------------------- /seeker/src/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! retry_timeout { 2 | ($timeout: expr, $retries: expr, $fut: expr) => { 3 | async { 4 | let mut retries: usize = $retries; 5 | loop { 6 | let ret = async_std::io::timeout($timeout, $fut).await; 7 | match ret { 8 | v @ Ok(_) => break v, 9 | Err(e) if e.kind() == std::io::ErrorKind::TimedOut => { 10 | tracing::warn!("retry_timeout: {}", $retries - retries); 11 | if retries <= 0 { 12 | return Err(e); 13 | } 14 | } 15 | e => { 16 | break e; 17 | } 18 | } 19 | retries -= 1; 20 | } 21 | } 22 | }; 23 | } 24 | -------------------------------------------------------------------------------- /seeker/src/main.rs: -------------------------------------------------------------------------------- 1 | #![type_length_limit = "2374570"] 2 | #[macro_use] 3 | mod macros; 4 | mod config_encryptor; 5 | mod config_watcher; 6 | mod dns_client; 7 | mod group_servers_chooser; 8 | mod logger; 9 | mod probe_connectivity; 10 | mod proxy_client; 11 | mod proxy_connection; 12 | mod proxy_tcp_stream; 13 | mod proxy_udp_socket; 14 | mod relay_tcp_stream; 15 | mod relay_udp_socket; 16 | mod server_chooser; 17 | mod server_performance; 18 | mod traffic; 19 | 20 | use clap::Parser; 21 | use std::env::current_dir; 22 | use std::net::SocketAddrV4; 23 | use std::path::Path; 24 | use std::time::Duration; 25 | 26 | use crate::config_encryptor::encrypt_config_file; 27 | use crate::config_watcher::watch_config; 28 | use crate::logger::setup_logger; 29 | use crate::proxy_client::ProxyClient; 30 | use anyhow::{Context, bail}; 31 | use async_std::prelude::FutureExt; 32 | use async_std::task::block_on; 33 | use config::Config; 34 | use crypto::CipherType; 35 | use sysconfig::{DNSSetup, IpForward, IptablesSetup, get_current_dns, set_rlimit_no_file}; 36 | use tracing::Instrument; 37 | 38 | const REDIR_LISTEN_PORT: u16 = 1300; 39 | 40 | /// CLI program for a proxy 41 | #[derive(Parser, Debug)] 42 | #[clap( 43 | name = "Seeker", 44 | author = "gfreezy ", 45 | about = "Tun to Shadowsockets proxy. https://github.com/gfreezy/seeker" 46 | )] 47 | struct SeekerArgs { 48 | /// Set config file. The sample config at https://github.com/gfreezy/seeker/blob/master/sample_config.yml 49 | #[clap(short, long, value_name = "FILE")] 50 | config: Option, 51 | 52 | /// URL to config 53 | #[clap(long, value_name = "CONFIG_URL")] 54 | config_url: Option, 55 | 56 | /// Key for encryption/decryption 57 | #[clap(long, value_name = "KEY")] 58 | key: Option, 59 | 60 | /// User id to proxy 61 | #[clap(short = 'u', long, value_name = "UID")] 62 | user_id: Option, 63 | 64 | /// Encrypt config file and output to terminal 65 | #[clap(long)] 66 | encrypt: bool, 67 | 68 | /// Log file 69 | #[clap(short = 'l', long, value_name = "PATH")] 70 | log: Option, 71 | 72 | /// Write a trace log 73 | #[clap(short = 't', long)] 74 | trace: bool, 75 | 76 | /// Show connection stats 77 | #[clap(short = 's', long)] 78 | stats: bool, 79 | } 80 | 81 | fn main() -> anyhow::Result<()> { 82 | let args = SeekerArgs::parse(); 83 | 84 | let path = args.config.as_ref().map(String::as_ref); 85 | let key = args.key.as_ref().map(String::as_ref); 86 | let to_encrypt = args.encrypt; 87 | let to_trace = args.trace; 88 | 89 | if to_encrypt { 90 | println!( 91 | "Encrypted content is as below:\n\n\n{}\n\n", 92 | encrypt_config_file(path, key)? 93 | ); 94 | return Ok(()); 95 | } 96 | let config_url = args.config_url; 97 | 98 | let config = load_config(path, config_url.as_deref(), get_current_dns(), key)?; 99 | 100 | // watch config file if path is provided 101 | let _watcher_handler = if let Some(p) = path { 102 | let config_clone = config.clone(); 103 | let mut config_path = Path::new(p).to_path_buf(); 104 | if !config_path.is_absolute() { 105 | config_path = current_dir() 106 | .expect("get current dir") 107 | .join(config_path) 108 | .canonicalize() 109 | .expect("canonicalize path"); 110 | } 111 | let watch_path = config_path.clone(); 112 | let path = config_path.to_str().expect("path to str").to_string(); 113 | tracing::info!("start watching config file: {:?}", watch_path); 114 | let debouncer = watch_config(watch_path, move |e| { 115 | tracing::info!("config file changed, reload rules: {:?}", e); 116 | match load_config(Some(&path), None, vec![], None) { 117 | Ok(new_config) => { 118 | config_clone 119 | .rules 120 | .replace_rules(new_config.rules.take_rules()); 121 | tracing::info!("Update rules success."); 122 | } 123 | Err(e) => { 124 | tracing::error!(?e, "Reload config error"); 125 | } 126 | } 127 | }); 128 | Some(debouncer) 129 | } else { 130 | None 131 | }; 132 | 133 | let dns = config 134 | .dns_listens 135 | .iter() 136 | .map(|addr| addr.parse::().unwrap().ip().to_string()) 137 | .collect(); 138 | // Linux system needs to be mut. 139 | #[allow(unused_mut)] 140 | let mut dns_setup = DNSSetup::new(dns); 141 | 142 | let uid = args.user_id; 143 | let log_path = args.log; 144 | let show_stats = args.stats; 145 | 146 | eprint!("Starting."); 147 | let _guard = setup_logger(log_path.as_deref(), to_trace)?; 148 | eprint!("."); 149 | set_rlimit_no_file(10240)?; 150 | eprint!("."); 151 | let _ip_forward = if config.gateway_mode { 152 | // In gateway mode, dns server need be accessible from the network. 153 | Some(IpForward::new()) 154 | } else { 155 | None 156 | }; 157 | eprint!("."); 158 | // oneshot channel 159 | let (tx, rx) = async_std::channel::bounded(1); 160 | ctrlc::set_handler(move || block_on(tx.send(())).expect("send signal")) 161 | .expect("Error setting Ctrl-C handler"); 162 | 163 | block_on(async { 164 | let cidr = config.tun_cidr.to_string(); 165 | let redir_mode = config.redir_mode; 166 | let client = ProxyClient::new(config, uid, show_stats) 167 | .instrument(tracing::trace_span!("ProxyClient.new")) 168 | .await; 169 | eprint!("."); 170 | 171 | dns_setup.start(); 172 | eprintln!("Started!"); 173 | 174 | let mut _iptables_setup: Option = None; 175 | if redir_mode { 176 | let setup = IptablesSetup::new(REDIR_LISTEN_PORT, cidr); 177 | setup.start(); 178 | _iptables_setup = Some(setup); 179 | } 180 | 181 | client 182 | .run() 183 | .instrument(tracing::trace_span!("ProxyClient.run")) 184 | .race(async { 185 | rx.recv() 186 | .instrument(tracing::trace_span!("Signal receiver")) 187 | .await 188 | .expect("Could not receive signal on channel."); 189 | }) 190 | .await; 191 | }); 192 | 193 | println!("Stop server. Bye bye..."); 194 | Ok(()) 195 | } 196 | 197 | fn load_config( 198 | path: Option<&str>, 199 | url: Option<&str>, 200 | original_dns: Vec, 201 | decrypt_key: Option<&str>, 202 | ) -> anyhow::Result { 203 | let mut c = match (path, url, decrypt_key) { 204 | (Some(p), ..) => Config::from_config_file(p).context("Load config from path error")?, 205 | (_, Some(url), Some(key)) => { 206 | let ret = ureq::get(url).timeout(Duration::from_secs(5)).call(); 207 | let resp = match ret { 208 | Err(e) => { 209 | return Err(anyhow::anyhow!( 210 | "Load config from remote host error: {}", 211 | e.to_string() 212 | )); 213 | } 214 | Ok(resp) => resp, 215 | }; 216 | let config = 217 | config_encryptor::decrypt_config(resp.into_reader(), CipherType::ChaCha20Ietf, key) 218 | .context("Decrypt remote config error")?; 219 | Config::from_reader(config.as_slice()).context("Load Config error")? 220 | } 221 | _ => bail!("Parameters error"), 222 | }; 223 | 224 | // If dns_servers is empty, use original dns servers. 225 | if c.dns_servers.is_empty() { 226 | for dns in original_dns { 227 | c.dns_servers.push(config::DnsServerAddr::UdpSocketAddr( 228 | format!("{dns}:53").parse()?, 229 | )); 230 | } 231 | } 232 | Ok(c) 233 | } 234 | -------------------------------------------------------------------------------- /seeker/src/proxy_connection.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | use std::time::{Duration, Instant}; 3 | 4 | use crate::traffic::Traffic; 5 | use config::{Address, ServerConfig, rule::Action}; 6 | use store::Store; 7 | 8 | // id generator for connection 9 | pub static CONNECTION_ID: AtomicUsize = AtomicUsize::new(0); 10 | 11 | pub fn next_connection_id() -> u64 { 12 | CONNECTION_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst) as u64 13 | } 14 | 15 | #[allow(dead_code)] 16 | pub trait ProxyConnection { 17 | fn id(&self) -> u64; 18 | fn network(&self) -> &'static str; 19 | fn conn_type(&self) -> &'static str; 20 | fn traffic(&self) -> Traffic; 21 | fn recv_bytes(&self) -> usize; 22 | fn sent_bytes(&self) -> usize; 23 | fn action(&self) -> Action; 24 | fn config(&self) -> Option<&ServerConfig>; 25 | fn has_config(&self, config: Option<&ServerConfig>) -> bool; 26 | fn shutdown(&self); 27 | fn is_alive(&self) -> bool; 28 | fn remote_addr(&self) -> Option<&Address> { 29 | None 30 | } 31 | fn duration(&self) -> Duration { 32 | self.connect_time().elapsed() 33 | } 34 | fn connect_time(&self) -> Instant; 35 | } 36 | 37 | pub trait ProxyConnectionEventListener { 38 | fn on_connect(&self, conn: &dyn ProxyConnection); 39 | fn on_shutdown(&self, conn: &dyn ProxyConnection); 40 | fn on_recv_bytes(&self, conn: &dyn ProxyConnection, bytes: usize); 41 | fn on_send_bytes(&self, conn: &dyn ProxyConnection, bytes: usize); 42 | } 43 | 44 | #[derive(Clone)] 45 | pub struct StoreListener; 46 | 47 | impl ProxyConnectionEventListener for StoreListener { 48 | fn on_connect(&self, conn: &dyn ProxyConnection) { 49 | let store = Store::global(); 50 | 51 | let host = conn 52 | .remote_addr() 53 | .map(|addr| addr.to_string()) 54 | .unwrap_or_default(); 55 | let proxy_server = conn 56 | .config() 57 | .map(|config| config.addr().to_string()) 58 | .unwrap_or_default(); 59 | let ret = store.new_connection( 60 | conn.id(), 61 | &host, 62 | conn.network(), 63 | conn.conn_type(), 64 | &proxy_server, 65 | ); 66 | if let Err(e) = ret { 67 | tracing::error!("Failed to insert live connection: {}", e); 68 | } 69 | } 70 | 71 | fn on_shutdown(&self, conn: &dyn ProxyConnection) { 72 | let store = Store::global(); 73 | let ret = store.shutdown_connection(conn.id()); 74 | if let Err(e) = ret { 75 | tracing::error!("Failed to remove live connection: {}", e); 76 | } 77 | } 78 | 79 | fn on_recv_bytes(&self, conn: &dyn ProxyConnection, bytes: usize) { 80 | let store = Store::global(); 81 | let ret = store.incr_connection_recv_bytes(conn.id(), bytes as u64); 82 | if let Err(e) = ret { 83 | tracing::error!("Failed to increment recv bytes: {}", e); 84 | } 85 | } 86 | 87 | fn on_send_bytes(&self, conn: &dyn ProxyConnection, bytes: usize) { 88 | let store = Store::global(); 89 | let ret = store.incr_connection_sent_bytes(conn.id(), bytes as u64); 90 | if let Err(e) = ret { 91 | tracing::error!("Failed to increment sent bytes: {}", e); 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /seeker/src/proxy_udp_socket.rs: -------------------------------------------------------------------------------- 1 | use crate::dns_client::DnsClient; 2 | use crate::proxy_connection::{ 3 | ProxyConnection, ProxyConnectionEventListener, StoreListener, next_connection_id, 4 | }; 5 | use crate::traffic::Traffic; 6 | use async_std::net::{SocketAddr, UdpSocket}; 7 | use config::rule::Action; 8 | use config::{ServerConfig, ServerProtocol}; 9 | use socks5_client::Socks5UdpSocket; 10 | use ssclient::SSUdpSocket; 11 | use std::io; 12 | use std::io::{Error, ErrorKind}; 13 | use std::sync::Arc; 14 | use std::sync::atomic::{AtomicBool, Ordering}; 15 | use std::time::Instant; 16 | 17 | #[derive(Clone)] 18 | enum ProxyUdpSocketInner { 19 | Direct(Arc), 20 | Socks5(Arc), 21 | Shadowsocks(Arc), 22 | } 23 | 24 | #[derive(Clone)] 25 | pub struct ProxyUdpSocket { 26 | id: u64, 27 | inner: ProxyUdpSocketInner, 28 | alive: Arc, 29 | config: Option, 30 | traffic: Traffic, 31 | connect_time: Instant, 32 | listener: Option>, 33 | } 34 | 35 | impl ProxyUdpSocket { 36 | pub async fn new(config: Option<&ServerConfig>, dns_client: DnsClient) -> io::Result { 37 | let socket = if let Some(config) = config { 38 | match config.protocol() { 39 | ServerProtocol::Socks5 => { 40 | let server = dns_client.lookup_address(config.addr()).await?; 41 | ProxyUdpSocketInner::Socks5(Arc::new(Socks5UdpSocket::new(server).await?)) 42 | } 43 | ServerProtocol::Shadowsocks => { 44 | let server = dns_client.lookup_address(config.addr()).await?; 45 | let (method, key) = match (config.method(), config.key()) { 46 | (Some(m), Some(k)) => (m, k), 47 | _ => { 48 | return Err(Error::new( 49 | ErrorKind::InvalidData, 50 | "method and password must be set for ss protocol.", 51 | )); 52 | } 53 | }; 54 | 55 | let udp = SSUdpSocket::new(server, method, key).await?; 56 | ProxyUdpSocketInner::Shadowsocks(Arc::new(udp)) 57 | } 58 | protocol => { 59 | return Err(Error::new( 60 | ErrorKind::ConnectionRefused, 61 | format!("udp not supported for {protocol:?}."), 62 | )); 63 | } 64 | } 65 | } else { 66 | ProxyUdpSocketInner::Direct(Arc::new(UdpSocket::bind("0.0.0.0:0").await?)) 67 | }; 68 | let listener: Option> = 69 | Some(Arc::new(StoreListener)); 70 | let socket = ProxyUdpSocket { 71 | inner: socket, 72 | alive: Arc::new(AtomicBool::new(true)), 73 | config: config.cloned(), 74 | traffic: Default::default(), 75 | connect_time: Instant::now(), 76 | id: next_connection_id(), 77 | listener: listener.clone(), 78 | }; 79 | if let Some(listener) = listener { 80 | listener.on_connect(&socket); 81 | } 82 | Ok(socket) 83 | } 84 | 85 | pub async fn send_to(&self, buf: &[u8], addr: SocketAddr) -> io::Result { 86 | if !self.is_alive() { 87 | return Err(Error::new( 88 | ErrorKind::BrokenPipe, 89 | "ProxyUdpSocket not alive", 90 | )); 91 | } 92 | let ret = match &self.inner { 93 | ProxyUdpSocketInner::Direct(socket) => socket.send_to(buf, addr).await, 94 | ProxyUdpSocketInner::Socks5(socket) => socket.send_to(buf, addr).await, 95 | ProxyUdpSocketInner::Shadowsocks(socket) => socket.send_to(buf, addr).await, 96 | }; 97 | match ret { 98 | Err(_) => { 99 | self.shutdown(); 100 | } 101 | Ok(size) => { 102 | self.traffic.send(size); 103 | if let Some(listener) = &self.listener { 104 | listener.on_send_bytes(self, size); 105 | } 106 | } 107 | } 108 | ret 109 | } 110 | 111 | pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { 112 | if !self.is_alive() { 113 | return Err(Error::new( 114 | ErrorKind::BrokenPipe, 115 | "ProxyUdpSocket not alive", 116 | )); 117 | } 118 | let ret = match &self.inner { 119 | ProxyUdpSocketInner::Direct(socket) => socket.recv_from(buf).await, 120 | ProxyUdpSocketInner::Socks5(socket) => socket.recv_from(buf).await, 121 | ProxyUdpSocketInner::Shadowsocks(socket) => socket.recv_from(buf).await, 122 | }; 123 | match ret { 124 | Err(_) => { 125 | self.shutdown(); 126 | } 127 | Ok((size, _)) => { 128 | self.traffic.recv(size); 129 | if let Some(listener) = &self.listener { 130 | listener.on_recv_bytes(self, size); 131 | } 132 | } 133 | } 134 | ret 135 | } 136 | } 137 | 138 | impl ProxyConnection for ProxyUdpSocket { 139 | fn network(&self) -> &'static str { 140 | "udp" 141 | } 142 | fn traffic(&self) -> Traffic { 143 | self.traffic.clone() 144 | } 145 | 146 | fn action(&self) -> config::rule::Action { 147 | match self.inner { 148 | ProxyUdpSocketInner::Direct(_) => Action::Direct, 149 | ProxyUdpSocketInner::Socks5(_) | ProxyUdpSocketInner::Shadowsocks(_) => { 150 | Action::Proxy("".to_string()) 151 | } 152 | } 153 | } 154 | 155 | fn config(&self) -> Option<&ServerConfig> { 156 | self.config.as_ref() 157 | } 158 | 159 | fn has_config(&self, config: Option<&ServerConfig>) -> bool { 160 | self.config.as_ref() == config 161 | } 162 | 163 | fn shutdown(&self) { 164 | self.alive.store(false, Ordering::SeqCst); 165 | if let Some(listener) = &self.listener { 166 | listener.on_shutdown(self); 167 | } 168 | } 169 | 170 | fn is_alive(&self) -> bool { 171 | self.alive.load(Ordering::SeqCst) 172 | } 173 | 174 | fn id(&self) -> u64 { 175 | self.id 176 | } 177 | 178 | fn connect_time(&self) -> std::time::Instant { 179 | self.connect_time 180 | } 181 | 182 | fn conn_type(&self) -> &'static str { 183 | match &self.inner { 184 | ProxyUdpSocketInner::Direct(_) => "direct", 185 | ProxyUdpSocketInner::Socks5(_) => "socks5", 186 | ProxyUdpSocketInner::Shadowsocks(_) => "shadowsocks", 187 | } 188 | } 189 | 190 | fn recv_bytes(&self) -> usize { 191 | self.traffic.received_bytes() 192 | } 193 | 194 | fn sent_bytes(&self) -> usize { 195 | self.traffic.sent_bytes() 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /seeker/src/relay_tcp_stream.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use async_std::io::{Read, Write, timeout}; 3 | use async_std::net::TcpStream; 4 | use async_std::prelude::*; 5 | use config::{Address, Config}; 6 | 7 | use std::net::SocketAddr; 8 | 9 | use std::time::Duration; 10 | use tracing::{error, instrument, trace}; 11 | 12 | use crate::probe_connectivity::ProbeConnectivity; 13 | use crate::proxy_client::get_action_for_addr; 14 | use crate::proxy_connection::ProxyConnection; 15 | use crate::proxy_tcp_stream::ProxyTcpStream; 16 | use crate::server_chooser::ServerChooser; 17 | 18 | #[allow(clippy::too_many_arguments)] 19 | #[instrument(skip_all)] 20 | pub(crate) async fn relay_tcp_stream( 21 | conn: TcpStream, 22 | real_src: SocketAddr, 23 | real_dest: SocketAddr, 24 | host: Address, 25 | config: Config, 26 | server_chooser: ServerChooser, 27 | connectivity: ProbeConnectivity, 28 | user_id: Option, 29 | on_update_activity: impl Fn() -> bool, 30 | ) -> Result<()> { 31 | let remote_conn = match choose_proxy_tcp_stream( 32 | real_src, 33 | real_dest, 34 | &host, 35 | &config, 36 | &server_chooser, 37 | &connectivity, 38 | user_id, 39 | ) 40 | .await 41 | { 42 | Ok(remote_conn) => remote_conn, 43 | Err(e) => { 44 | error!(?host, ?e, "connect remote error"); 45 | return Err(e); 46 | } 47 | }; 48 | 49 | let ret = tunnel_tcp_stream( 50 | &host, 51 | conn, 52 | remote_conn.clone(), 53 | config.read_timeout, 54 | config.write_timeout, 55 | on_update_activity, 56 | ) 57 | .await; 58 | if let Err(e) = &ret { 59 | tracing::error!(?e, ?host, "tunnel tcp stream"); 60 | } else { 61 | tracing::info!("tunnel tcp stream: recycle port, host: {host}"); 62 | } 63 | remote_conn.shutdown(); 64 | Ok(()) 65 | } 66 | 67 | #[instrument(skip( 68 | original_addr, 69 | sock_addr, 70 | config, 71 | server_chooser, 72 | connectivity, 73 | user_id 74 | ))] 75 | async fn choose_proxy_tcp_stream( 76 | original_addr: SocketAddr, 77 | sock_addr: SocketAddr, 78 | remote_addr: &Address, 79 | config: &Config, 80 | server_chooser: &ServerChooser, 81 | connectivity: &ProbeConnectivity, 82 | user_id: Option, 83 | ) -> Result { 84 | let action = get_action_for_addr( 85 | original_addr, 86 | sock_addr, 87 | remote_addr, 88 | config, 89 | connectivity, 90 | user_id, 91 | ) 92 | .await?; 93 | trace!(?action, "selected action"); 94 | 95 | Ok(retry_timeout!( 96 | config.connect_timeout, 97 | config.max_connect_errors, 98 | server_chooser.candidate_tcp_stream(remote_addr.clone(), action.clone()) 99 | ) 100 | .await?) 101 | } 102 | 103 | async fn tunnel_tcp_stream( 104 | _host: &Address, 105 | mut conn1: T1, 106 | mut conn2: T2, 107 | read_timeout: Duration, 108 | write_timeout: Duration, 109 | on_update_activity: impl Fn() -> bool, 110 | ) -> std::io::Result<()> { 111 | let mut conn1_clone = conn1.clone(); 112 | let mut conn2_clone = conn2.clone(); 113 | let f1 = async { 114 | let mut buf = vec![0; 1600]; 115 | loop { 116 | if !on_update_activity() { 117 | break Err(std::io::ErrorKind::ConnectionAborted.into()); 118 | } 119 | let size = timeout(read_timeout, conn1.read(&mut buf)).await?; 120 | if size == 0 { 121 | break Ok(()); 122 | } 123 | timeout(write_timeout, conn2.write_all(&buf[..size])).await?; 124 | } 125 | }; 126 | let f2 = async { 127 | let mut buf = vec![0; 1600]; 128 | loop { 129 | if !on_update_activity() { 130 | break Err(std::io::ErrorKind::ConnectionAborted.into()); 131 | } 132 | let size = timeout(read_timeout, conn2_clone.read(&mut buf)).await?; 133 | if size == 0 { 134 | break Ok(()); 135 | } 136 | timeout(write_timeout, conn1_clone.write_all(&buf[..size])).await?; 137 | } 138 | }; 139 | f1.race(f2).await 140 | } 141 | -------------------------------------------------------------------------------- /seeker/src/relay_udp_socket.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::sync::Arc; 3 | 4 | use async_std::io::timeout; 5 | use async_std::net::UdpSocket; 6 | use async_std::task::spawn; 7 | use config::{Address, Config}; 8 | use dnsserver::resolver::RuleBasedDnsResolver; 9 | use tun_nat::SessionManager; 10 | 11 | use crate::dns_client::DnsClient; 12 | use crate::probe_connectivity::ProbeConnectivity; 13 | use crate::proxy_client::{UdpManager, get_action_for_addr, get_real_src_real_dest_and_host}; 14 | use crate::proxy_connection::ProxyConnection; 15 | use crate::proxy_udp_socket::ProxyUdpSocket; 16 | use crate::server_chooser::ServerChooser; 17 | 18 | #[allow(clippy::too_many_arguments)] 19 | pub(crate) async fn relay_udp_socket( 20 | tun_socket: Arc, 21 | tun_addr: SocketAddr, 22 | session_manager: SessionManager, 23 | resolver: RuleBasedDnsResolver, 24 | dns_client: DnsClient, 25 | config: Config, 26 | server_chooser: ServerChooser, 27 | connectivity: ProbeConnectivity, 28 | user_id: Option, 29 | udp_manager: UdpManager, 30 | ) -> std::io::Result<(ProxyUdpSocket, SocketAddr, Address)> { 31 | let session_port = tun_addr.port(); 32 | let (real_src, real_dest, host) = get_real_src_real_dest_and_host( 33 | session_port, 34 | &session_manager, 35 | &resolver, 36 | &dns_client, 37 | &config, 38 | ) 39 | .await?; 40 | tracing::debug!(?real_src, ?real_dest, ?host, "new udp connection"); 41 | let proxy_socket = choose_proxy_udp_socket( 42 | real_src, 43 | real_dest, 44 | &host, 45 | &config, 46 | &server_chooser, 47 | &connectivity, 48 | user_id, 49 | ) 50 | .await?; 51 | 52 | tracing::debug!("new udp connection successfully, {}", host); 53 | 54 | let proxy_client_clone = proxy_socket.clone(); 55 | let host_clone = host.clone(); 56 | let udp_manager_clone = udp_manager.clone(); 57 | spawn(async move { 58 | let _: std::io::Result<()> = async { 59 | let mut buf = vec![0; 2000]; 60 | loop { 61 | if !session_manager.update_activity_for_port(session_port) { 62 | return Err(std::io::Error::new( 63 | std::io::ErrorKind::ConnectionAborted, 64 | format!("port recycled, {host_clone}"), 65 | )); 66 | } 67 | let (recv_size, _peer) = 68 | timeout(config.read_timeout, proxy_client_clone.recv_from(&mut buf)).await?; 69 | assert!(recv_size < 2000); 70 | let send_size = timeout( 71 | config.write_timeout, 72 | tun_socket.send_to(&buf[..recv_size], tun_addr), 73 | ) 74 | .await?; 75 | assert_eq!(send_size, recv_size); 76 | } 77 | } 78 | .await; 79 | session_manager.recycle_port(session_port); 80 | udp_manager_clone.write().remove(&session_port); 81 | proxy_client_clone.shutdown(); 82 | }); 83 | 84 | udp_manager.write().insert( 85 | session_port, 86 | (proxy_socket.clone(), real_dest, host.clone()), 87 | ); 88 | 89 | Ok((proxy_socket, real_dest, host)) 90 | } 91 | 92 | async fn choose_proxy_udp_socket( 93 | real_src: SocketAddr, 94 | real_dest: SocketAddr, 95 | remote_addr: &Address, 96 | config: &Config, 97 | server_chooser: &ServerChooser, 98 | connectivity: &ProbeConnectivity, 99 | user_id: Option, 100 | ) -> std::io::Result { 101 | let action = get_action_for_addr( 102 | real_src, 103 | real_dest, 104 | remote_addr, 105 | config, 106 | connectivity, 107 | user_id, 108 | ) 109 | .await?; 110 | tracing::debug!(?action, ?remote_addr, "udp action"); 111 | 112 | retry_timeout!( 113 | config.connect_timeout, 114 | config.max_connect_errors, 115 | server_chooser.candidate_udp_socket(action.clone()) 116 | ) 117 | .await 118 | } 119 | -------------------------------------------------------------------------------- /seeker/src/server_chooser.rs: -------------------------------------------------------------------------------- 1 | use crate::dns_client::DnsClient; 2 | use crate::group_servers_chooser::GroupServersChooser; 3 | use crate::proxy_tcp_stream::ProxyTcpStream; 4 | use crate::proxy_udp_socket::ProxyUdpSocket; 5 | use anyhow::Result; 6 | use async_std::task::spawn; 7 | use config::rule::Action; 8 | use config::{Address, Config}; 9 | use futures_util::future::join_all; 10 | use std::collections::HashMap; 11 | 12 | #[derive(Clone)] 13 | pub struct ServerChooser { 14 | dns_client: DnsClient, 15 | group_servers_chooser: HashMap, 16 | } 17 | 18 | impl ServerChooser { 19 | pub async fn new(config: Config, dns_client: DnsClient, show_stats: bool) -> Self { 20 | let mut group_servers_chooser = HashMap::new(); 21 | for group in config.proxy_groups.iter() { 22 | let ping_urls = if group.ping_urls.is_empty() { 23 | config.ping_urls.clone() 24 | } else { 25 | group.ping_urls.clone() 26 | }; 27 | let ping_timeout = if let Some(ping_timeout) = group.ping_timeout { 28 | ping_timeout 29 | } else { 30 | config.ping_timeout 31 | }; 32 | let servers = config.get_servers_by_name(&group.name); 33 | group_servers_chooser.insert( 34 | group.name.clone(), 35 | GroupServersChooser::new( 36 | group.name.clone(), 37 | servers, 38 | dns_client.clone(), 39 | ping_urls, 40 | ping_timeout, 41 | show_stats, 42 | ) 43 | .await, 44 | ); 45 | } 46 | Self { 47 | dns_client, 48 | group_servers_chooser, 49 | } 50 | } 51 | 52 | #[tracing::instrument(skip(self))] 53 | pub async fn candidate_tcp_stream( 54 | &self, 55 | remote_addr: Address, 56 | action: Action, 57 | ) -> std::io::Result { 58 | let stream = match action { 59 | Action::Proxy(proxy_group_name) => { 60 | self.proxy_connect(&remote_addr, &proxy_group_name).await? 61 | } 62 | Action::Direct => self.direct_connect(&remote_addr).await?, 63 | _ => unreachable!(), 64 | }; 65 | 66 | Ok(stream) 67 | } 68 | 69 | pub async fn proxy_connect( 70 | &self, 71 | remote_addr: &Address, 72 | proxy_group_name: &str, 73 | ) -> std::io::Result { 74 | let Some(chooser) = self.group_servers_chooser.get(proxy_group_name) else { 75 | return Err(std::io::Error::new( 76 | std::io::ErrorKind::NotFound, 77 | format!("proxy group {} not found", proxy_group_name), 78 | )); 79 | }; 80 | chooser.proxy_connect(remote_addr).await 81 | } 82 | 83 | async fn direct_connect(&self, remote_addr: &Address) -> std::io::Result { 84 | let ret = ProxyTcpStream::connect(remote_addr.clone(), None, self.dns_client.clone()).await; 85 | if ret.is_err() { 86 | tracing::error!(?remote_addr, action = ?Action::Direct, "Failed to connect to server"); 87 | } 88 | ret 89 | } 90 | 91 | pub async fn candidate_udp_socket(&self, action: Action) -> std::io::Result { 92 | let socket = match &action { 93 | Action::Direct => ProxyUdpSocket::new(None, self.dns_client.clone()).await?, 94 | Action::Proxy(proxy_group_name) => { 95 | let Some(chooser) = self.group_servers_chooser.get(proxy_group_name) else { 96 | return Err(std::io::Error::new( 97 | std::io::ErrorKind::NotFound, 98 | format!("proxy group {} not found", proxy_group_name), 99 | )); 100 | }; 101 | chooser.candidate_udp_socket(action).await? 102 | } 103 | _ => unreachable!(), 104 | }; 105 | Ok(socket) 106 | } 107 | 108 | pub async fn run_background_tasks(&self) -> Result<()> { 109 | let mut handles = Vec::new(); 110 | for chooser in self.group_servers_chooser.values() { 111 | let chooser = chooser.clone(); 112 | handles.push(spawn(async move { chooser.run_background_tasks().await })); 113 | } 114 | join_all(handles).await.into_iter().collect() 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /seeker/src/server_performance.rs: -------------------------------------------------------------------------------- 1 | use config::ServerConfig; 2 | use parking_lot::Mutex; 3 | use std::collections::HashMap; 4 | use std::sync::Arc; 5 | use std::time::{Duration, Instant}; 6 | 7 | pub const FAILURE_LATENCY: Duration = Duration::from_secs(60); // 60秒表示服务器不可用 8 | pub const DEFAULT_SCORE: f64 = 100000.0; // 未测试过的服务器返回无穷大 9 | pub const DEFAULT_LATENCY: f64 = 100000.0; // 未测试过的服务器返回无穷大 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct ServerPerformanceStats { 13 | pub score: f64, 14 | pub latency: f64, 15 | pub success_rate: f64, 16 | pub success: u32, 17 | pub failure: u32, 18 | } 19 | 20 | #[derive(Clone)] 21 | pub struct ServerPerformance { 22 | latency_history: Vec<(Instant, Duration)>, 23 | success_count: u32, 24 | failure_count: u32, 25 | last_update: Instant, 26 | max_history_size: usize, 27 | half_life: Duration, 28 | } 29 | 30 | impl ServerPerformance { 31 | pub fn new(max_history_size: usize, half_life: Duration) -> Self { 32 | Self { 33 | latency_history: Vec::new(), 34 | success_count: 0, 35 | failure_count: 0, 36 | last_update: Instant::now(), 37 | max_history_size, 38 | half_life, 39 | } 40 | } 41 | 42 | pub fn add_result(&mut self, latency: Option, success: bool) { 43 | let now = Instant::now(); 44 | 45 | if success { 46 | self.latency_history 47 | .push((now, latency.unwrap_or(FAILURE_LATENCY))); 48 | self.success_count += 1; 49 | } else { 50 | self.latency_history.push((now, FAILURE_LATENCY)); 51 | self.failure_count += 1; 52 | } 53 | self.last_update = now; 54 | 55 | // Keep only the most recent records 56 | if self.latency_history.len() > self.max_history_size { 57 | self.latency_history.remove(0); 58 | } 59 | } 60 | 61 | pub fn calculate_score(&self, now: Instant) -> f64 { 62 | let mut total_weighted_latency = 0.0; 63 | let mut total_weight = 0.0; 64 | 65 | for (timestamp, latency) in &self.latency_history { 66 | let age = now.duration_since(*timestamp); 67 | let weight = 2.0_f64.powf(-age.as_secs_f64() / self.half_life.as_secs_f64()); 68 | total_weighted_latency += latency.as_millis() as f64 * weight; 69 | total_weight += weight; 70 | } 71 | 72 | if total_weight == 0.0 { 73 | return DEFAULT_SCORE; 74 | } 75 | 76 | // 如果没有成功记录,返回默认高分 77 | if self.success_count == 0 { 78 | return DEFAULT_SCORE; 79 | } 80 | 81 | total_weighted_latency / total_weight 82 | } 83 | 84 | pub fn get_stats(&self) -> ServerPerformanceStats { 85 | let now = Instant::now(); 86 | let mut total_latency = 0.0; 87 | let mut count = 0; 88 | 89 | for (timestamp, latency) in &self.latency_history { 90 | let age = now.duration_since(*timestamp); 91 | let weight = 2.0_f64.powf(-age.as_secs_f64() / self.half_life.as_secs_f64()); 92 | total_latency += latency.as_millis() as f64 * weight; 93 | count += 1; 94 | } 95 | 96 | let avg_latency = if count > 0 { 97 | total_latency / count as f64 98 | } else { 99 | DEFAULT_LATENCY 100 | }; 101 | 102 | let success_rate = if self.success_count + self.failure_count > 0 { 103 | self.success_count as f64 / (self.success_count + self.failure_count) as f64 104 | } else { 105 | 0.0 106 | }; 107 | 108 | let score = self.calculate_score(now); 109 | 110 | ServerPerformanceStats { 111 | score, 112 | latency: avg_latency, 113 | success_rate, 114 | success: self.success_count, 115 | failure: self.failure_count, 116 | } 117 | } 118 | } 119 | 120 | #[derive(Clone)] 121 | pub struct ServerPerformanceTracker { 122 | performance_history: Arc>>, 123 | max_history_size: usize, 124 | half_life: Duration, 125 | } 126 | 127 | impl ServerPerformanceTracker { 128 | pub fn new(max_history_size: usize, half_life: Duration) -> Self { 129 | Self { 130 | performance_history: Arc::new(Mutex::new(HashMap::new())), 131 | max_history_size, 132 | half_life, 133 | } 134 | } 135 | 136 | pub fn add_result(&self, server: &ServerConfig, latency: Option, success: bool) { 137 | let mut history = self.performance_history.lock(); 138 | let performance = history 139 | .entry(server.addr().to_string()) 140 | .or_insert_with(|| ServerPerformance::new(self.max_history_size, self.half_life)); 141 | performance.add_result(latency, success); 142 | } 143 | 144 | pub fn get_server_score(&self, server: &ServerConfig, now: Instant) -> f64 { 145 | let history = self.performance_history.lock(); 146 | history 147 | .get(&server.addr().to_string()) 148 | .map_or(DEFAULT_SCORE, |p| p.calculate_score(now)) 149 | } 150 | 151 | #[allow(dead_code)] 152 | pub fn get_server_stats(&self, server: &ServerConfig) -> Option { 153 | let history = self.performance_history.lock(); 154 | history 155 | .get(&server.addr().to_string()) 156 | .map(|p| p.get_stats()) 157 | } 158 | 159 | pub fn get_all_server_stats(&self) -> Vec<(String, ServerPerformanceStats)> { 160 | let history = self.performance_history.lock(); 161 | history 162 | .iter() 163 | .map(|(addr, perf)| { 164 | let stats = perf.get_stats(); 165 | (addr.clone(), stats) 166 | }) 167 | .collect() 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /seeker/src/traffic.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::sync::atomic::{AtomicUsize, Ordering}; 3 | use std::time::{Duration, Instant}; 4 | 5 | #[derive(Clone)] 6 | pub struct Traffic { 7 | connect_time: Instant, 8 | recv: Arc, 9 | send: Arc, 10 | } 11 | 12 | impl Default for Traffic { 13 | fn default() -> Self { 14 | Self { 15 | connect_time: Instant::now(), 16 | recv: Arc::new(AtomicUsize::new(0)), 17 | send: Arc::new(AtomicUsize::new(0)), 18 | } 19 | } 20 | } 21 | 22 | impl Traffic { 23 | pub fn recv(&self, size: usize) { 24 | self.recv.fetch_add(size, Ordering::Relaxed); 25 | } 26 | 27 | pub fn send(&self, size: usize) { 28 | self.send.fetch_add(size, Ordering::Relaxed); 29 | } 30 | 31 | pub fn received_bytes(&self) -> usize { 32 | self.recv.load(Ordering::Relaxed) 33 | } 34 | 35 | pub fn sent_bytes(&self) -> usize { 36 | self.send.load(Ordering::Relaxed) 37 | } 38 | 39 | pub fn duration(&self) -> Duration { 40 | Instant::now().duration_since(self.connect_time) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /socks5_client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "socks5_client" 3 | version = "20250331.0.0" 4 | authors = ["Alex.F "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | bytes = { workspace = true } 11 | async-std = { workspace = true } 12 | -------------------------------------------------------------------------------- /socks5_client/examples/udp_echo.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::net::UdpSocket; 3 | 4 | fn main() -> io::Result<()> { 5 | let socket = UdpSocket::bind("0.0.0.0:10240")?; 6 | let mut buf = vec![0; 1440]; 7 | loop { 8 | let (size, addr) = socket.recv_from(&mut buf)?; 9 | println!("{}", String::from_utf8_lossy(&buf[..size])); 10 | socket.send_to(&buf[..size], addr)?; 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /socks5_client/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod tcp; 2 | mod types; 3 | mod udp; 4 | 5 | pub use tcp::Socks5TcpStream; 6 | pub use types::Address; 7 | pub use udp::Socks5UdpSocket; 8 | -------------------------------------------------------------------------------- /socks5_client/src/tcp.rs: -------------------------------------------------------------------------------- 1 | use crate::types::{ 2 | Address, Command, HandshakeRequest, HandshakeResponse, Reply, TcpRequestHeader, 3 | TcpResponseHeader, SOCKS5_AUTH_METHOD_NONE, 4 | }; 5 | use async_std::io::prelude::{Read, Write}; 6 | use async_std::net::{SocketAddr, TcpStream}; 7 | use async_std::task::{Context, Poll}; 8 | use std::io::{Error, ErrorKind, Result}; 9 | use std::pin::Pin; 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct Socks5TcpStream { 13 | conn: TcpStream, 14 | } 15 | 16 | impl Socks5TcpStream { 17 | pub async fn connect(socks5_server: SocketAddr, addr: Address) -> Result { 18 | let mut conn = TcpStream::connect(socks5_server).await?; 19 | let handshake_req = HandshakeRequest::new(vec![SOCKS5_AUTH_METHOD_NONE]); 20 | handshake_req.write_to(&mut conn).await?; 21 | let handshake_resp = HandshakeResponse::read_from(&mut conn).await?; 22 | if handshake_resp.chosen_method != SOCKS5_AUTH_METHOD_NONE { 23 | return Err(Error::new(ErrorKind::InvalidData, "response methods error")); 24 | } 25 | 26 | let req_header = TcpRequestHeader::new(Command::TcpConnect, addr.clone()); 27 | req_header.write_to(&mut conn).await?; 28 | let resp_header = TcpResponseHeader::read_from(&mut conn).await?; 29 | if resp_header.reply != Reply::Succeeded { 30 | return Err(Error::new( 31 | ErrorKind::InvalidData, 32 | format!("reply error: {:?}", resp_header.reply), 33 | )); 34 | } 35 | 36 | Ok(Socks5TcpStream { conn }) 37 | } 38 | } 39 | 40 | impl Read for Socks5TcpStream { 41 | fn poll_read( 42 | self: Pin<&mut Self>, 43 | cx: &mut Context<'_>, 44 | buf: &mut [u8], 45 | ) -> Poll> { 46 | Pin::new(&mut &self.conn).poll_read(cx, buf) 47 | } 48 | } 49 | 50 | impl Write for Socks5TcpStream { 51 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 52 | Pin::new(&mut &self.conn).poll_write(cx, buf) 53 | } 54 | 55 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 56 | Pin::new(&mut &self.conn).poll_flush(cx) 57 | } 58 | 59 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 60 | Pin::new(&mut &self.conn).poll_close(cx) 61 | } 62 | } 63 | 64 | impl Read for &Socks5TcpStream { 65 | fn poll_read( 66 | self: Pin<&mut Self>, 67 | cx: &mut Context<'_>, 68 | buf: &mut [u8], 69 | ) -> Poll> { 70 | Pin::new(&mut &self.conn).poll_read(cx, buf) 71 | } 72 | } 73 | 74 | impl Write for &Socks5TcpStream { 75 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 76 | Pin::new(&mut &self.conn).poll_write(cx, buf) 77 | } 78 | 79 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 80 | Pin::new(&mut &self.conn).poll_flush(cx) 81 | } 82 | 83 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 84 | Pin::new(&mut &self.conn).poll_close(cx) 85 | } 86 | } 87 | // 88 | // #[cfg(test)] 89 | // mod tests { 90 | // use super::*; 91 | // use async_std::io::prelude::{ReadExt, WriteExt}; 92 | // use async_std::task::block_on; 93 | // 94 | // #[test] 95 | // fn test_req_baidu() -> Result<()> { 96 | // block_on(async { 97 | // let mut conn = Socks5TcpStream::connect( 98 | // "127.0.0.1:1086".parse().unwrap(), 99 | // Address::DomainNameAddress("t.cn".to_string(), 80), 100 | // ) 101 | // .await?; 102 | // conn.write_all(r#"GET / HTTP/1.1\r\nHost: t.cn\r\n\r\n"#.as_bytes()) 103 | // .await?; 104 | // let mut resp = vec![0; 1024]; 105 | // let size = conn.read(&mut resp).await?; 106 | // let resp_text = String::from_utf8_lossy(&resp[..size]).to_string(); 107 | // assert!(resp_text.contains("HTTP/1.1")); 108 | // Ok(()) 109 | // }) 110 | // } 111 | // } 112 | -------------------------------------------------------------------------------- /socks5_client/src/udp.rs: -------------------------------------------------------------------------------- 1 | use crate::types::{ 2 | Address, Command, HandshakeRequest, HandshakeResponse, Reply, TcpRequestHeader, 3 | TcpResponseHeader, UdpAssociateHeader, SOCKS5_AUTH_METHOD_NONE, 4 | }; 5 | use async_std::io; 6 | use async_std::net::{TcpStream, UdpSocket}; 7 | use std::io::{Error, ErrorKind, Result}; 8 | use std::net::SocketAddr; 9 | use std::time::Duration; 10 | 11 | #[derive(Debug)] 12 | pub struct Socks5UdpSocket { 13 | socket: UdpSocket, 14 | #[allow(dead_code)] 15 | associate_conn: TcpStream, 16 | } 17 | 18 | impl Socks5UdpSocket { 19 | pub async fn new(socks5_server: SocketAddr) -> Result { 20 | let socket = UdpSocket::bind("0.0.0.0:0").await?; 21 | let mut conn = 22 | io::timeout(Duration::from_secs(1), TcpStream::connect(socks5_server)).await?; 23 | let handshake_req = HandshakeRequest::new(vec![SOCKS5_AUTH_METHOD_NONE]); 24 | handshake_req.write_to(&mut conn).await?; 25 | let handshake_resp = HandshakeResponse::read_from(&mut conn).await?; 26 | if handshake_resp.chosen_method != SOCKS5_AUTH_METHOD_NONE { 27 | return Err(Error::new(ErrorKind::InvalidData, "response methods error")); 28 | } 29 | let req_header = TcpRequestHeader::new( 30 | Command::UdpAssociate, 31 | Address::SocketAddress("0.0.0.0:0".parse().expect("never error")), 32 | ); 33 | req_header.write_to(&mut conn).await?; 34 | let resp_header = TcpResponseHeader::read_from(&mut conn).await?; 35 | if resp_header.reply != Reply::Succeeded { 36 | return Err(Error::new( 37 | ErrorKind::InvalidData, 38 | format!("reply error: {:?}", resp_header.reply), 39 | )); 40 | } 41 | let server_bind_addr = match resp_header.address { 42 | Address::SocketAddress(addr) => addr, 43 | Address::DomainNameAddress(_, _) => { 44 | return Err(Error::new( 45 | ErrorKind::InvalidData, 46 | "invalid udp bind addr, domain is not allowed", 47 | )); 48 | } 49 | }; 50 | socket.connect(server_bind_addr).await?; 51 | Ok(Socks5UdpSocket { 52 | socket, 53 | associate_conn: conn, 54 | }) 55 | } 56 | 57 | pub async fn send_to(&self, buf: &[u8], addr: SocketAddr) -> Result { 58 | let mut buffer = vec![0; 1600]; 59 | let udp_header = UdpAssociateHeader::new(0, Address::SocketAddress(addr)); 60 | let mut size = 0; 61 | udp_header.write_to_buf(&mut buffer[size..].as_mut()); 62 | size += udp_header.serialized_len(); 63 | buffer[size..size + buf.len()].copy_from_slice(buf); 64 | size += buf.len(); 65 | let send_size = self.socket.send(&buffer[..size]).await?; 66 | assert_eq!(send_size, size); 67 | Ok(buf.len()) 68 | } 69 | 70 | pub async fn recv_from(&self, buf: &mut [u8]) -> Result<(usize, SocketAddr)> { 71 | let mut buffer = vec![0; 1500]; 72 | let size = self.socket.recv(&mut buffer).await?; 73 | let udp_header = UdpAssociateHeader::read_from(&mut buffer.as_slice()).await?; 74 | if udp_header.frag != 0 { 75 | return Err(Error::new(ErrorKind::InvalidData, "frag is not allowed")); 76 | } 77 | let udp_header_len = udp_header.serialized_len(); 78 | let addr = udp_header.address; 79 | buf[..size - udp_header_len].copy_from_slice(&buffer[udp_header_len..size]); 80 | let socket_addr = match addr { 81 | Address::SocketAddress(socket_addr) => socket_addr, 82 | Address::DomainNameAddress(_, _) => { 83 | return Err(Error::new(ErrorKind::InvalidData, "invalid addr format")) 84 | } 85 | }; 86 | Ok((size - udp_header_len, socket_addr)) 87 | } 88 | 89 | pub fn local_addr(&self) -> Result { 90 | self.socket.local_addr() 91 | } 92 | } 93 | 94 | // #[cfg(test)] 95 | // mod tests { 96 | // use super::*; 97 | // use async_std::task::block_on; 98 | // use std::str::FromStr; 99 | // 100 | // #[test] 101 | // fn test_udp() -> Result<()> { 102 | // block_on(async { 103 | // let server = "127.0.0.1:1086".parse().unwrap(); 104 | // let udp = Socks5UdpSocket::connect(server).await?; 105 | // let mut buf = vec![0; 1500]; 106 | // let to_addr = Address::SocketAddress("118.145.8.14:10240".parse().unwrap()); 107 | // let size = udp.send_to(b"hello", to_addr.clone()).await?; 108 | // let (s, addr) = udp.recv_from(&mut buf).await?; 109 | // assert_eq!(s, size); 110 | // assert_eq!(addr, to_addr); 111 | // assert_eq!(&buf[..s], b"hello"); 112 | // Ok(()) 113 | // }) 114 | // } 115 | // } 116 | -------------------------------------------------------------------------------- /ssclient/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ssclient" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | tracing = { workspace = true } 11 | bytes = { workspace = true } 12 | byteorder = { workspace = true } 13 | config = { path = "../config" } 14 | crypto = { path = "../crypto" } 15 | async-std = { workspace = true } 16 | parking_lot = { workspace = true } 17 | tcp_connection = { path = "../tcp_connection" } 18 | 19 | [dev-dependencies] 20 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 21 | -------------------------------------------------------------------------------- /ssclient/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod tcp_io; 2 | mod udp_io; 3 | 4 | const BUFFER_SIZE: usize = 8 * 1024; // 8K buffer 5 | 6 | pub use tcp_io::SSTcpStream; 7 | pub use udp_io::crypto_io::{decrypt_payload, encrypt_payload}; 8 | pub use udp_io::SSUdpSocket; 9 | -------------------------------------------------------------------------------- /ssclient/src/udp_io.rs: -------------------------------------------------------------------------------- 1 | //! UDP relay client 2 | pub mod crypto_io; 3 | 4 | use std::{ 5 | io, 6 | net::{IpAddr, Ipv4Addr, SocketAddr}, 7 | }; 8 | 9 | use bytes::{Bytes, BytesMut}; 10 | use tracing::debug; 11 | 12 | use self::crypto_io::{decrypt_payload, encrypt_payload}; 13 | 14 | use async_std::net::UdpSocket; 15 | use config::Address; 16 | use crypto::CipherType; 17 | 18 | pub const MAXIMUM_UDP_PAYLOAD_SIZE: usize = 1600; 19 | 20 | /// UDP client for communicating with ShadowSocks' server 21 | pub struct SSUdpSocket { 22 | socket: UdpSocket, 23 | method: CipherType, 24 | key: Bytes, 25 | } 26 | 27 | impl SSUdpSocket { 28 | /// Create a client to communicate with Shadowsocks' UDP server 29 | pub async fn new( 30 | server_addr: SocketAddr, 31 | method: CipherType, 32 | key: Bytes, 33 | ) -> io::Result { 34 | let local_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0); 35 | let socket = UdpSocket::bind(local_addr).await?; 36 | socket.connect(server_addr).await?; 37 | 38 | Ok(SSUdpSocket { 39 | socket, 40 | method, 41 | key, 42 | }) 43 | } 44 | 45 | pub fn bind(socket: UdpSocket, method: CipherType, key: Bytes) -> SSUdpSocket { 46 | SSUdpSocket { 47 | socket, 48 | method, 49 | key, 50 | } 51 | } 52 | 53 | /// Send a UDP packet to addr through proxy 54 | pub async fn send_to(&self, payload: &[u8], sock_addr: SocketAddr) -> io::Result { 55 | let addr: Address = sock_addr.into(); 56 | debug!( 57 | "UDP server client send to {}, payload length {} bytes", 58 | addr, 59 | payload.len() 60 | ); 61 | 62 | // CLIENT -> SERVER protocol: ADDRESS + PAYLOAD 63 | let mut send_buf = Vec::with_capacity(addr.serialized_len() + payload.len()); 64 | addr.write_to_buf(&mut send_buf); 65 | send_buf.extend_from_slice(payload); 66 | 67 | let mut encrypt_buf = BytesMut::with_capacity(MAXIMUM_UDP_PAYLOAD_SIZE); 68 | encrypt_payload(self.method, &self.key, &send_buf, &mut encrypt_buf)?; 69 | 70 | let send_len = self.socket.send(&encrypt_buf[..]).await?; 71 | 72 | assert_eq!(encrypt_buf.len(), send_len); 73 | 74 | Ok(payload.len()) 75 | } 76 | 77 | /// Receive packet from Shadowsocks' UDP server 78 | pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { 79 | // Waiting for response from server SERVER -> CLIENT 80 | let mut recv_buf = [0u8; MAXIMUM_UDP_PAYLOAD_SIZE]; 81 | 82 | let recv_n = self.socket.recv(&mut recv_buf).await?; 83 | let mut decrypt_buf = BytesMut::with_capacity(MAXIMUM_UDP_PAYLOAD_SIZE); 84 | 85 | let decrypt_size = decrypt_payload( 86 | self.method, 87 | &self.key, 88 | &recv_buf[..recv_n], 89 | &mut decrypt_buf, 90 | )?; 91 | let addr = Address::read_from(&mut decrypt_buf.as_ref()).await?; 92 | let payload = &decrypt_buf[addr.serialized_len()..decrypt_size]; 93 | buf[..payload.len()].copy_from_slice(payload); 94 | 95 | debug!( 96 | "UDP server client recv_from {}, payload length {} bytes", 97 | addr, 98 | payload.len() 99 | ); 100 | 101 | let sock_addr = match addr { 102 | Address::SocketAddress(s) => s, 103 | Address::DomainNameAddress(_, _) => { 104 | return Err(io::Error::new( 105 | io::ErrorKind::InvalidData, 106 | "invalid addr format", 107 | )) 108 | } 109 | }; 110 | Ok((payload.len(), sock_addr)) 111 | } 112 | } 113 | 114 | #[cfg(test)] 115 | mod tests { 116 | use super::*; 117 | use async_std::task::{block_on, sleep, spawn}; 118 | use std::net::ToSocketAddrs; 119 | use std::time::Duration; 120 | 121 | #[test] 122 | fn test_read_write() { 123 | let method = CipherType::ChaCha20Ietf; 124 | let password = "GwEU01uXWm0Pp6t08"; 125 | let key = method.bytes_to_key(password.as_bytes()); 126 | let server = "127.0.0.1:14188".to_socket_addrs().unwrap().next().unwrap(); 127 | let data = b"GET / HTTP/1.1\r\n\r\n"; 128 | let addr = "127.0.0.1:443".parse().unwrap(); 129 | block_on(async { 130 | let key_clone = key.clone(); 131 | let h = spawn(async move { 132 | let u = UdpSocket::bind("0.0.0.0:14188").await.unwrap(); 133 | let udp = SSUdpSocket::bind(u, method, key_clone); 134 | let mut b = vec![0; 1024]; 135 | let (s, _) = udp.recv_from(&mut b).await.unwrap(); 136 | assert_eq!(&b[..s], data); 137 | }); 138 | sleep(Duration::from_secs(1)).await; 139 | let udp = SSUdpSocket::new(server, method, key).await.unwrap(); 140 | udp.send_to(data, addr).await.unwrap(); 141 | h.await; 142 | }); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /ssclient/src/udp_io/crypto_io.rs: -------------------------------------------------------------------------------- 1 | //! Crypto protocol for ShadowSocks UDP 2 | //! 3 | //! Payload with stream cipher 4 | //! ```plain 5 | //! +-------+----------+ 6 | //! | IV | Payload | 7 | //! +-------+----------+ 8 | //! | Fixed | Variable | 9 | //! +-------+----------+ 10 | //! ``` 11 | //! 12 | //! Payload with AEAD cipher 13 | //! 14 | //! ```plain 15 | //! UDP (after encryption, *ciphertext*) 16 | //! +--------+-----------+-----------+ 17 | //! | NONCE | *Data* | Data_TAG | 18 | //! +--------+-----------+-----------+ 19 | //! | Fixed | Variable | Fixed | 20 | //! +--------+-----------+-----------+ 21 | //! ``` 22 | 23 | use std::io::{Error, ErrorKind, Result}; 24 | 25 | use bytes::{BufMut, BytesMut}; 26 | use crypto::{CipherCategory, CipherType, CryptoMode}; 27 | 28 | /// Encrypt payload into ShadowSocks UDP encrypted packet 29 | pub fn encrypt_payload( 30 | t: CipherType, 31 | key: &[u8], 32 | payload: &[u8], 33 | output: &mut BytesMut, 34 | ) -> Result { 35 | match t.category() { 36 | CipherCategory::Stream => encrypt_payload_stream(t, key, payload, output), 37 | CipherCategory::Aead => encrypt_payload_aead(t, key, payload, output), 38 | } 39 | } 40 | 41 | /// Decrypt payload from ShadowSocks UDP encrypted packet 42 | pub fn decrypt_payload( 43 | t: CipherType, 44 | key: &[u8], 45 | payload: &[u8], 46 | output: &mut BytesMut, 47 | ) -> Result { 48 | match t.category() { 49 | CipherCategory::Stream => decrypt_payload_stream(t, key, payload, output), 50 | CipherCategory::Aead => decrypt_payload_aead(t, key, payload, output), 51 | } 52 | } 53 | 54 | #[allow(clippy::unnecessary_wraps)] 55 | fn encrypt_payload_aead( 56 | t: CipherType, 57 | key: &[u8], 58 | payload: &[u8], 59 | output: &mut BytesMut, 60 | ) -> Result { 61 | let salt = t.gen_salt(); 62 | let tag_size = t.tag_size(); 63 | let mut cipher = crypto::new_aead_encryptor(t, key, &salt); 64 | 65 | let salt_len = salt.len(); 66 | output.put_slice(&salt); 67 | output.resize(salt_len + payload.len() + tag_size, 0); 68 | 69 | cipher.encrypt( 70 | payload, 71 | &mut output[salt_len..salt_len + payload.len() + tag_size], 72 | ); 73 | 74 | Ok(salt_len + payload.len() + tag_size) 75 | } 76 | 77 | fn decrypt_payload_aead( 78 | t: CipherType, 79 | key: &[u8], 80 | payload: &[u8], 81 | output: &mut BytesMut, 82 | ) -> Result { 83 | let tag_size = t.tag_size(); 84 | let salt_size = t.salt_size(); 85 | 86 | if payload.len() < tag_size + salt_size { 87 | let err = Error::new(ErrorKind::UnexpectedEof, "udp packet too short"); 88 | return Err(err); 89 | } 90 | 91 | let salt = &payload[..salt_size]; 92 | let data = &payload[salt_size..]; 93 | let data_length = payload.len() - tag_size - salt_size; 94 | 95 | let mut cipher = crypto::new_aead_decryptor(t, key, salt); 96 | 97 | output.resize(data_length, 0); 98 | cipher.decrypt(data, &mut output[..data_length])?; 99 | 100 | Ok(data_length) 101 | } 102 | 103 | fn encrypt_payload_stream( 104 | t: CipherType, 105 | key: &[u8], 106 | payload: &[u8], 107 | output: &mut BytesMut, 108 | ) -> Result { 109 | let iv = t.gen_init_vec(); 110 | let mut cipher = crypto::new_stream(t, key, &iv, CryptoMode::Encrypt); 111 | 112 | output.put_slice(&iv); 113 | cipher.update(payload, output)?; 114 | cipher.finalize(output)?; 115 | Ok(payload.len() + iv.len()) 116 | } 117 | 118 | fn decrypt_payload_stream( 119 | t: CipherType, 120 | key: &[u8], 121 | payload: &[u8], 122 | output: &mut BytesMut, 123 | ) -> Result { 124 | let iv_size = t.iv_size(); 125 | 126 | let iv = &payload[..iv_size]; 127 | let data = &payload[iv_size..]; 128 | 129 | let mut cipher = crypto::new_stream(t, key, iv, CryptoMode::Decrypt); 130 | 131 | cipher.update(data, output)?; 132 | cipher.finalize(output)?; 133 | 134 | Ok(data.len()) 135 | } 136 | 137 | #[cfg(test)] 138 | mod tests { 139 | use super::*; 140 | use crate::udp_io::MAXIMUM_UDP_PAYLOAD_SIZE; 141 | 142 | #[test] 143 | fn test_encrypt_and_decrypt_payload_aead() { 144 | let cipher_type = CipherType::XChaCha20IetfPoly1305; 145 | let key = cipher_type.bytes_to_key(b"key"); 146 | let payload = b"payload"; 147 | let mut output = BytesMut::with_capacity(MAXIMUM_UDP_PAYLOAD_SIZE); 148 | let mut output2 = BytesMut::with_capacity(MAXIMUM_UDP_PAYLOAD_SIZE); 149 | let size = encrypt_payload_aead(cipher_type, &key, payload, &mut output).unwrap(); 150 | let size2 = decrypt_payload_aead(cipher_type, &key, &output[..size], &mut output2).unwrap(); 151 | assert_eq!(&output2[..size2], payload); 152 | } 153 | 154 | #[test] 155 | fn test_encrypt_and_decrypt_payload_stream() { 156 | let cipher_type = CipherType::ChaCha20Ietf; 157 | let key = cipher_type.bytes_to_key(b"key"); 158 | let payload = b"payload"; 159 | let mut output = BytesMut::with_capacity(MAXIMUM_UDP_PAYLOAD_SIZE); 160 | let mut output2 = BytesMut::with_capacity(MAXIMUM_UDP_PAYLOAD_SIZE); 161 | let size = encrypt_payload_stream(cipher_type, &key, payload, &mut output).unwrap(); 162 | let size2 = 163 | decrypt_payload_stream(cipher_type, &key, &output[..size], &mut output2).unwrap(); 164 | assert_eq!( 165 | std::str::from_utf8(&output2[..size2]), 166 | std::str::from_utf8(payload) 167 | ); 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "store" 3 | version = "20250331.0.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | rusqlite = { workspace = true, features = ["bundled"] } 10 | tracing = { workspace = true } 11 | anyhow = { workspace = true, features = ["backtrace"] } 12 | once_cell = { workspace = true } 13 | parking_lot = { workspace = true } 14 | cfg-if = { workspace = true } 15 | -------------------------------------------------------------------------------- /store/src/config.rs: -------------------------------------------------------------------------------- 1 | use crate::{now, Store}; 2 | use anyhow::Result; 3 | 4 | const CONFIG_REMOTE_SERVERS_CACHE_TTL: u64 = 60 * 60 * 24 * 90; // 90 days 5 | 6 | // region: config 7 | impl Store { 8 | pub fn get_cached_remote_config_data(&self, remote_url: &str) -> Result>> { 9 | let conn = self.conn.lock(); 10 | let mut stmt = conn.prepare_cached(&format!( 11 | r#"SELECT * FROM {} WHERE url = ?"#, 12 | Self::TABLE_REMOTE_CONFIG_CACHE 13 | ))?; 14 | let ret = stmt.query_row((remote_url,), |row| { 15 | Ok(( 16 | row.get::<_, Vec>("data")?, 17 | row.get::<_, u64>("last_update")?, 18 | )) 19 | }); 20 | let (data, last_update) = match ret { 21 | Ok(value) => value, 22 | Err(rusqlite::Error::QueryReturnedNoRows) => return Ok(None), 23 | Err(e) => return Err(e.into()), 24 | }; 25 | if last_update + CONFIG_REMOTE_SERVERS_CACHE_TTL < now() { 26 | self.delete_cached_data(remote_url)?; 27 | return Ok(None); 28 | } 29 | Ok(Some(data)) 30 | } 31 | 32 | pub fn cache_remote_config_data(&self, url: &str, data: &[u8]) -> Result<()> { 33 | let conn = self.conn.lock(); 34 | let mut stmt = conn.prepare_cached(&format!( 35 | r#"INSERT OR REPLACE INTO {} (url, data, last_update) VALUES (?, ?, ?)"#, 36 | Self::TABLE_REMOTE_CONFIG_CACHE 37 | ))?; 38 | let affected = stmt.execute((url, data, &now()))?; 39 | assert_eq!(affected, 1); 40 | self.delete_expired_data()?; 41 | Ok(()) 42 | } 43 | 44 | fn delete_cached_data(&self, url: &str) -> Result<()> { 45 | let conn = self.conn.lock(); 46 | let mut stmt = conn.prepare_cached(&format!( 47 | r#"DELETE FROM {} WHERE url = ?"#, 48 | Self::TABLE_REMOTE_CONFIG_CACHE 49 | ))?; 50 | let _affected = stmt.execute((url,))?; 51 | Ok(()) 52 | } 53 | 54 | fn delete_expired_data(&self) -> Result<()> { 55 | let conn = self.conn.lock(); 56 | let mut stmt = conn.prepare_cached(&format!( 57 | r#"DELETE FROM {} WHERE last_update < ?"#, 58 | Self::TABLE_REMOTE_CONFIG_CACHE 59 | ))?; 60 | let _affected = stmt.execute([(now() - CONFIG_REMOTE_SERVERS_CACHE_TTL)])?; 61 | Ok(()) 62 | } 63 | } 64 | // endregion: config 65 | 66 | #[cfg(test)] 67 | mod tests { 68 | use super::*; 69 | 70 | #[test] 71 | fn test_cache_remote_config_data() -> Result<()> { 72 | let initial_ip = "168.0.0.1".parse().unwrap(); 73 | let store = Store::new_in_memory(initial_ip)?; 74 | let data = store.get_cached_remote_config_data("https://www.baidu.com")?; 75 | assert!(data.is_none()); 76 | let data = b"hello".to_vec(); 77 | store.cache_remote_config_data("https://www.baidu.com", &data)?; 78 | let data2 = store.get_cached_remote_config_data("https://www.baidu.com")?; 79 | assert_eq!(data2, Some(data)); 80 | Ok(()) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /store/src/dns.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use std::net::Ipv4Addr; 3 | 4 | use crate::Store; 5 | 6 | // region: host and ip mapping 7 | impl Store { 8 | pub fn get_host_by_ipv4(&self, ip: Ipv4Addr) -> Result> { 9 | let conn = self.conn.lock(); 10 | let mut stmt = conn.prepare_cached(&format!( 11 | r#"SELECT host FROM {} WHERE ip = ?"#, 12 | Self::TABLE_HOST_IP 13 | ))?; 14 | let ret = stmt.query_row([Into::::into(ip)], |row| row.get::<_, String>("host")); 15 | match ret { 16 | Ok(host) => Ok(Some(host)), 17 | Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), 18 | Err(e) => Err(e.into()), 19 | } 20 | } 21 | 22 | pub fn get_ipv4_by_host(&self, host: &str) -> Result { 23 | let conn = self.conn.lock(); 24 | let mut stmt = conn.prepare_cached(&format!( 25 | r#"SELECT ip FROM {} WHERE host = ?"#, 26 | Self::TABLE_HOST_IP 27 | ))?; 28 | match stmt.query_row((host,), |row| row.get::<_, u32>("ip")) { 29 | Ok(v) => Ok(Ipv4Addr::from(v)), 30 | Err(rusqlite::Error::QueryReturnedNoRows) => { 31 | let next_ip = self.next_ip()?; 32 | self.associate_ipv4_and_host(next_ip, host)?; 33 | Ok(next_ip) 34 | } 35 | Err(e) => Err(e.into()), 36 | } 37 | } 38 | 39 | fn next_ip(&self) -> Result { 40 | let conn = self.conn.lock(); 41 | let mut stmt = conn.prepare_cached(&format!( 42 | r#"SELECT MAX(ip) AS ip FROM {}"#, 43 | Self::TABLE_HOST_IP 44 | ))?; 45 | match stmt.query_row((), |row| row.get::<_, u32>("ip")) { 46 | Ok(v) => Ok(Ipv4Addr::from(v.checked_add(1).expect("ip addr overflow"))), 47 | Err(e) => Err(e.into()), 48 | } 49 | } 50 | 51 | fn associate_ipv4_and_host(&self, ip: Ipv4Addr, host: &str) -> Result<()> { 52 | let conn = self.conn.lock(); 53 | let mut stmt = conn.prepare_cached(&format!( 54 | r#"INSERT INTO {} (ip, host) VALUES (?, ?)"#, 55 | Self::TABLE_HOST_IP 56 | ))?; 57 | let affected = stmt.execute((Into::::into(ip), host))?; 58 | assert_eq!(affected, 1); 59 | Ok(()) 60 | } 61 | } 62 | // endregion: host and ip mapping 63 | 64 | #[cfg(test)] 65 | mod tests { 66 | use super::*; 67 | 68 | #[test] 69 | fn test_get_ipv4_by_host() -> Result<()> { 70 | let initial_ip = "168.0.0.1".parse().unwrap(); 71 | let store = Store::new_in_memory(initial_ip)?; 72 | let baidu_domain = "www.baidu.com"; 73 | let baidu_ip = store.get_ipv4_by_host(baidu_domain)?; 74 | assert_eq!(baidu_ip, Ipv4Addr::from(Into::::into(initial_ip))); 75 | assert_eq!( 76 | store.get_host_by_ipv4(baidu_ip)?, 77 | Some(baidu_domain.to_string()) 78 | ); 79 | Ok(()) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /store/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod config; 2 | mod connections; 3 | mod dns; 4 | 5 | use parking_lot::ReentrantMutex; 6 | use std::collections::HashMap; 7 | use std::net::Ipv4Addr; 8 | use std::path::{Path, PathBuf}; 9 | use std::sync::atomic::AtomicU32; 10 | use std::time::{SystemTime, UNIX_EPOCH}; 11 | 12 | use anyhow::Result; 13 | use once_cell::sync::OnceCell; 14 | use parking_lot::RwLock; 15 | use rusqlite::Connection; 16 | 17 | #[derive(Debug)] 18 | pub struct Store { 19 | conn: ReentrantMutex, 20 | initial_ip: Ipv4Addr, 21 | db_path: PathBuf, 22 | cache_stats: RwLock>, 23 | last_flush_ts: AtomicU32, 24 | } 25 | 26 | static INSTANCE: OnceCell = OnceCell::new(); 27 | 28 | impl Clone for Store { 29 | fn clone(&self) -> Self { 30 | Self { 31 | conn: ReentrantMutex::new(Connection::open(&self.db_path).expect("open db")), 32 | initial_ip: self.initial_ip, 33 | db_path: self.db_path.clone(), 34 | cache_stats: RwLock::new(HashMap::new()), 35 | last_flush_ts: AtomicU32::new(0), 36 | } 37 | } 38 | } 39 | 40 | impl Store { 41 | const TABLE_HOST_IP: &'static str = "host_ip"; 42 | const TABLE_REMOTE_CONFIG_CACHE: &'static str = "remote_config_cache"; 43 | const TABLE_CONNECTIONS: &'static str = "connections"; 44 | 45 | pub fn setup_global(path: impl AsRef, initial_ip: Ipv4Addr) { 46 | let _ = INSTANCE.get_or_init(|| Store::new(path, initial_ip).expect("init store")); 47 | } 48 | 49 | pub fn setup_global_for_test() { 50 | let _ = INSTANCE 51 | .get_or_init(|| Store::new_in_memory("10.0.0.1".parse().unwrap()).expect("init store")); 52 | } 53 | 54 | pub fn global() -> &'static Self { 55 | INSTANCE.get().expect("global store is not initialized") 56 | } 57 | 58 | pub fn new(db_path: impl AsRef, initial_ip: Ipv4Addr) -> Result { 59 | let path = db_path.as_ref().to_path_buf(); 60 | let conn = match Connection::open(&path) { 61 | Ok(conn) => conn, 62 | Err(e) => { 63 | eprintln!( 64 | "Open db `{:?}` error: {}.\nDelete and reinitialize db", 65 | &path, e 66 | ); 67 | std::fs::remove_dir_all(&path)?; 68 | Connection::open(&path)? 69 | } 70 | }; 71 | conn.pragma_update(None, "journal_mode", "WAL") 72 | .expect("set journal_mode"); 73 | conn.pragma_update(None, "synchronous", "off") 74 | .expect("set synchronous"); 75 | conn.pragma_update(None, "temp_store", "memory") 76 | .expect("set temp_store"); 77 | let store = Store { 78 | db_path: path, 79 | conn: ReentrantMutex::new(conn), 80 | initial_ip, 81 | cache_stats: RwLock::new(HashMap::new()), 82 | last_flush_ts: AtomicU32::new(0), 83 | }; 84 | store.init_tables()?; 85 | Ok(store) 86 | } 87 | 88 | #[cfg(test)] 89 | pub fn store_for_test() -> Self { 90 | Store::new_in_memory(Ipv4Addr::new(127, 0, 0, 1)).expect("init store") 91 | } 92 | 93 | pub fn new_in_memory(initial_ip: Ipv4Addr) -> Result { 94 | let conn = Connection::open_in_memory()?; 95 | let store = Store { 96 | db_path: PathBuf::new(), 97 | conn: ReentrantMutex::new(conn), 98 | initial_ip, 99 | cache_stats: RwLock::new(HashMap::new()), 100 | last_flush_ts: AtomicU32::new(0), 101 | }; 102 | store.init_tables()?; 103 | Ok(store) 104 | } 105 | 106 | fn init_tables(&self) -> Result<()> { 107 | let conn = self.conn.lock(); 108 | let _ = conn.execute( 109 | &format!( 110 | r#" 111 | CREATE TABLE IF NOT EXISTS {} ( 112 | ip INTEGER PRIMARY KEY, 113 | host TEXT NOT NULL UNIQUE 114 | ) 115 | "#, 116 | Self::TABLE_HOST_IP, 117 | ), 118 | (), 119 | )?; 120 | 121 | // region: remote_config_cache 122 | conn.execute_batch(&format!( 123 | r#" 124 | CREATE TABLE IF NOT EXISTS {table} ( 125 | id INTEGER PRIMARY KEY AUTOINCREMENT, 126 | url TEXT NOT NULL UNIQUE, 127 | data BLOB NOT NULL, 128 | last_update INTEGER NOT NULL 129 | ); 130 | CREATE INDEX IF NOT EXISTS {table}_last_update ON {table} (last_update); 131 | "#, 132 | table = Self::TABLE_REMOTE_CONFIG_CACHE, 133 | ))?; 134 | let mut stmt = conn.prepare_cached(&format!( 135 | r#"INSERT OR IGNORE INTO {} (ip, host) VALUES (?, ?)"#, 136 | Self::TABLE_HOST_IP 137 | ))?; 138 | let ip_num: u32 = self.initial_ip.into(); 139 | assert!(ip_num > 1, "initial ip should be greater than 1"); 140 | let prev: Ipv4Addr = (ip_num - 1).into(); 141 | let _ = stmt.execute((Into::::into(prev), ""))?; 142 | // endregion: remote_config_cache 143 | 144 | // region: connections 145 | // | id | host | network | type | recv_bytes | sent_bytes | proxy_server | connect_time | last_update | is_alive | 146 | // connection data is cleared whenever the process starts. 147 | conn.execute_batch(&format!( 148 | r#" 149 | DROP TABLE IF EXISTS {table}; 150 | CREATE TABLE IF NOT EXISTS {table} ( 151 | id INTEGER PRIMARY KEY, 152 | host TEXT NOT NULL, 153 | network TEXT NOT NULL, 154 | type TEXT NOT NULL, 155 | recv_bytes INTEGER NOT NULL, 156 | sent_bytes INTEGER NOT NULL, 157 | proxy_server TEXT NOT NULL, 158 | connect_time INTEGER NOT NULL, 159 | last_update INTEGER NOT NULL, 160 | is_alive INTEGER NOT NULL 161 | ); 162 | "#, 163 | table = Self::TABLE_CONNECTIONS, 164 | ))?; 165 | // endregion: connections 166 | Ok(()) 167 | } 168 | } 169 | 170 | pub fn now() -> u64 { 171 | SystemTime::now() 172 | .duration_since(UNIX_EPOCH) 173 | .unwrap() 174 | .as_secs() 175 | } 176 | -------------------------------------------------------------------------------- /sysconfig/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sysconfig" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | [dependencies] 9 | tracing = { workspace = true } 10 | libc = { workspace = true } 11 | 12 | [target.'cfg(target_os="macos")'.dependencies] 13 | libproc = { git = "https://github.com/gfreezy/libproc-rs", rev = "bffc2c4" } 14 | 15 | [target.'cfg(target_os="linux")'.dependencies] 16 | procfs = "0.14.1" 17 | -------------------------------------------------------------------------------- /sysconfig/src/command.rs: -------------------------------------------------------------------------------- 1 | use std::process::Command; 2 | use tracing::info; 3 | 4 | pub fn run_cmd(cmd: &str, args: &[&str]) -> String { 5 | let output = Command::new(cmd) 6 | .args(args) 7 | .output() 8 | .unwrap_or_else(|_| panic!("run cmd failed: {cmd}, args: {args:?}")); 9 | let stdout = std::str::from_utf8(&output.stdout).expect("utf8"); 10 | let stderr = std::str::from_utf8(&output.stderr).expect("utf8"); 11 | info!("cmd: {cmd}, args: {:?}", args); 12 | info!("stdout: {}", stdout); 13 | info!("stderr: {}", stderr); 14 | 15 | if !output.status.success() { 16 | panic!( 17 | "{} {}\nstdout: {}\nstderr: {}", 18 | cmd, 19 | args.join(" "), 20 | stdout, 21 | stderr 22 | ); 23 | } 24 | stdout.to_string() 25 | } 26 | -------------------------------------------------------------------------------- /sysconfig/src/iptables.rs: -------------------------------------------------------------------------------- 1 | use crate::command::run_cmd; 2 | 3 | pub struct IptablesSetup { 4 | port: u16, 5 | cidr: String, 6 | } 7 | 8 | impl IptablesSetup { 9 | pub fn new(port: u16, cidr: String) -> Self { 10 | IptablesSetup { port, cidr } 11 | } 12 | 13 | pub fn start(&self) { 14 | setup_redirect_iptables(&self.cidr, self.port); 15 | } 16 | } 17 | 18 | impl Drop for IptablesSetup { 19 | fn drop(&mut self) { 20 | teardown_redirect_iptables(&self.cidr, self.port); 21 | } 22 | } 23 | 24 | fn teardown_redirect_iptables(cidr: &str, port: u16) { 25 | let _ = run_cmd( 26 | "iptables", 27 | &[ 28 | "-t", 29 | "nat", 30 | "-D", 31 | "PREROUTING", 32 | "-d", 33 | cidr, 34 | "-p", 35 | "tcp", 36 | "-j", 37 | "REDIRECT", 38 | "--to-ports", 39 | &port.to_string(), 40 | ], 41 | ); 42 | } 43 | 44 | fn setup_redirect_iptables(cidr: &str, port: u16) { 45 | let _ = run_cmd( 46 | "iptables", 47 | &[ 48 | "-t", 49 | "nat", 50 | "-A", 51 | "PREROUTING", 52 | "-d", 53 | cidr, 54 | "-p", 55 | "tcp", 56 | "-j", 57 | "REDIRECT", 58 | "--to-ports", 59 | &port.to_string(), 60 | ], 61 | ); 62 | } 63 | -------------------------------------------------------------------------------- /sysconfig/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod command; 2 | mod iptables; 3 | mod net; 4 | #[cfg(target_arch = "x86_64")] 5 | mod proc; 6 | mod ulimit; 7 | 8 | pub use iptables::IptablesSetup; 9 | pub use net::{get_current_dns, setup_ip, DNSSetup, IpForward}; 10 | #[cfg(target_arch = "x86_64")] 11 | pub use proc::sys::{list_system_proc_socks, list_user_proc_socks}; 12 | #[cfg(target_arch = "x86_64")] 13 | pub use proc::SocketInfo; 14 | pub use ulimit::{get_rlimit_no_file, set_rlimit_no_file}; 15 | -------------------------------------------------------------------------------- /sysconfig/src/net/darwin.rs: -------------------------------------------------------------------------------- 1 | use crate::command::run_cmd; 2 | use std::net::IpAddr; 3 | use tracing::info; 4 | 5 | pub struct DNSSetup { 6 | primary_network: String, 7 | // DNS servers from scutil. Real used DNS servers. 8 | original_real_dns: Vec, 9 | // DNS servers from networksetup. DHCP dns servers are not included. 10 | original_manual_dns: Vec, 11 | // DNS servers to be set. 12 | dns: Vec, 13 | } 14 | 15 | impl DNSSetup { 16 | #[allow(clippy::new_without_default)] 17 | pub fn new(dns: Vec) -> Self { 18 | let network = get_primary_network(); 19 | info!("Primary netowrk service is {}", &network); 20 | let original_manual_dns = run_cmd("networksetup", &["-getdnsservers", &network]) 21 | .lines() 22 | .filter_map(|l| l.parse::().ok()) 23 | .map(|ip| ip.to_string()) 24 | .collect::>(); 25 | 26 | // Get macos dns servers from terminal 27 | let original_dns = get_current_dns(); 28 | 29 | DNSSetup { 30 | primary_network: network, 31 | original_real_dns: original_dns, 32 | original_manual_dns, 33 | dns, 34 | } 35 | } 36 | 37 | pub fn start(&self) { 38 | let original_dns = &self.original_manual_dns; 39 | let network = &self.primary_network; 40 | if self.dns.is_empty() { 41 | let _ = run_cmd("networksetup", &["-setdnsservers", network, "127.0.0.1"]); 42 | } else { 43 | let to_set = self.dns.join(" "); 44 | let _ = run_cmd( 45 | "networksetup", 46 | &["-setdnsservers", network, "127.0.0.1", &to_set], 47 | ); 48 | } 49 | 50 | info!( 51 | "Setup DNS: {:?}, Original DNS is {:?}, Original real DNS is {:?}", 52 | &self.dns, &self.original_manual_dns, &original_dns, 53 | ); 54 | } 55 | 56 | pub fn original_dns(&self) -> Vec { 57 | self.original_real_dns.clone() 58 | } 59 | } 60 | 61 | impl Drop for DNSSetup { 62 | fn drop(&mut self) { 63 | let mut args = vec!["-setdnsservers", &self.primary_network]; 64 | if self.original_manual_dns.is_empty() { 65 | args.push("empty"); 66 | } else { 67 | for dns in &self.original_manual_dns { 68 | args.push(dns); 69 | } 70 | }; 71 | info!("Restore original DNS: {:?}", self.original_manual_dns); 72 | 73 | let _ = run_cmd("networksetup", &args); 74 | } 75 | } 76 | 77 | pub fn setup_ip(tun_name: &str, ip: &str, cidr: &str, additional_cidrs: Vec) { 78 | let _ = run_cmd("ifconfig", &[tun_name, ip, ip]); 79 | let _ = run_cmd("route", &["add", cidr, ip]); 80 | for additional_cidr in additional_cidrs { 81 | let _ = run_cmd("route", &["add", additional_cidr.as_str(), ip]); 82 | } 83 | } 84 | 85 | fn get_primary_network() -> String { 86 | let route_ret = run_cmd("route", &["-n", "get", "0.0.0.0"]); 87 | let device = route_ret 88 | .lines() 89 | .find(|l| l.contains("interface:")) 90 | .and_then(|l| l.split_whitespace().last()) 91 | .map(|s| s.trim()) 92 | .expect("get primary device"); 93 | info!("Primary device is {}", device); 94 | let network_services = run_cmd("networksetup", &["-listallhardwareports"]); 95 | let mut iter = network_services.lines().peekable(); 96 | loop { 97 | if let Some(line) = iter.next() { 98 | if let Some(next_line) = iter.peek() { 99 | if next_line.split(':').next_back().map(|l| l.contains(device)) == Some(true) { 100 | if let Some(network) = line.split(':').next_back().map(|s| s.trim()) { 101 | return network.to_string(); 102 | } 103 | } 104 | } else { 105 | panic!("No primary network found"); 106 | } 107 | } else { 108 | panic!("No primary network found"); 109 | } 110 | } 111 | } 112 | 113 | pub fn get_current_dns() -> Vec { 114 | // Get macos dns servers from terminal 115 | let lines = run_cmd("scutil", &["--dns"]); 116 | let original_dns = parse_scutil_dns(&lines); 117 | info!("Original DNS is {:?}", original_dns); 118 | original_dns 119 | } 120 | 121 | fn parse_scutil_dns(lines: &str) -> Vec { 122 | let mut dns: Vec = vec![]; 123 | for l in lines.lines() { 124 | if !l.trim().starts_with("nameserver[") { 125 | continue; 126 | } 127 | let Some(ip) = l.split(':').nth(1) else { 128 | continue; 129 | }; 130 | let Ok(ip) = ip.trim().parse::() else { 131 | continue; 132 | }; 133 | let ip = ip.to_string(); 134 | if !dns.contains(&ip) { 135 | dns.push(ip); 136 | } 137 | } 138 | dns 139 | } 140 | 141 | #[cfg(test)] 142 | mod tests { 143 | use super::*; 144 | 145 | #[test] 146 | fn test_parse_scutil_dns() { 147 | let lines = r#"DNS configuration 148 | 149 | resolver #1 150 | nameserver[0] : 192.168.2.1 151 | if_index : 15 (en0) 152 | flags : Request A records 153 | reach : 0x00020002 (Reachable,Directly Reachable Address) 154 | 155 | resolver #2 156 | domain : local 157 | options : mdns 158 | timeout : 5 159 | flags : Request A records 160 | reach : 0x00000000 (Not Reachable) 161 | order : 300000 162 | 163 | resolver #3 164 | domain : 254.169.in-addr.arpa 165 | options : mdns 166 | timeout : 5 167 | flags : Request A records 168 | reach : 0x00000000 (Not Reachable) 169 | order : 300200 170 | 171 | resolver #4 172 | domain : 8.e.f.ip6.arpa 173 | options : mdns 174 | timeout : 5 175 | flags : Request A records 176 | reach : 0x00000000 (Not Reachable) 177 | order : 300400 178 | 179 | resolver #5 180 | domain : 9.e.f.ip6.arpa 181 | options : mdns 182 | timeout : 5 183 | flags : Request A records 184 | reach : 0x00000000 (Not Reachable) 185 | order : 300600 186 | 187 | resolver #6 188 | domain : a.e.f.ip6.arpa 189 | options : mdns 190 | timeout : 5 191 | flags : Request A records 192 | reach : 0x00000000 (Not Reachable) 193 | order : 300800 194 | 195 | resolver #7 196 | domain : b.e.f.ip6.arpa 197 | options : mdns 198 | timeout : 5 199 | flags : Request A records 200 | reach : 0x00000000 (Not Reachable) 201 | order : 301000 202 | 203 | DNS configuration (for scoped queries) 204 | 205 | resolver #1 206 | nameserver[0] : 192.168.2.1 207 | if_index : 15 (en0) 208 | flags : Scoped, Request A records 209 | reach : 0x00020002 (Reachable,Directly Reachable Address) 210 | "#; 211 | let ret = parse_scutil_dns(lines); 212 | assert_eq!(ret, vec!["192.168.2.1"]); 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /sysconfig/src/net/linux.rs: -------------------------------------------------------------------------------- 1 | use crate::command::run_cmd; 2 | use std::fs::OpenOptions; 3 | use std::io::{Read, Write}; 4 | use std::net::IpAddr; 5 | use tracing::info; 6 | 7 | pub struct DNSSetup { 8 | original_dns: Vec, 9 | use_resolved: bool, 10 | dns: Vec, 11 | } 12 | 13 | const RESOLV_PATH: &str = "/etc/resolv.conf"; 14 | const RESOLVED_OVERRIDE_PATH: &str = "/etc/systemd/resolved.conf.d/00-dns.conf"; 15 | 16 | impl DNSSetup { 17 | pub fn new(dns: Vec) -> Self { 18 | if Self::is_system_using_resolved() { 19 | info!("setup dns with systemd-resolved"); 20 | DNSSetup { 21 | original_dns: vec![], 22 | use_resolved: true, 23 | dns, 24 | } 25 | } else { 26 | info!("setup dns with /etc/resolv.conf"); 27 | DNSSetup { 28 | original_dns: vec![], 29 | use_resolved: false, 30 | dns, 31 | } 32 | } 33 | } 34 | 35 | fn set_with_dnsresolv_conf(&mut self) { 36 | let original_dns = get_current_dns(); 37 | info!("original dns: {:?}", &original_dns); 38 | let mut resolv = OpenOptions::new().write(true).open(RESOLV_PATH).unwrap(); 39 | resolv 40 | .write_all(generate_resolve_file(&self.dns).as_slice()) 41 | .unwrap(); 42 | self.original_dns = original_dns; 43 | } 44 | 45 | fn is_system_using_resolved() -> bool { 46 | // check `/etc/resolv.conf` is a symlink 47 | std::fs::symlink_metadata(RESOLV_PATH) 48 | .map(|m| m.file_type().is_symlink()) 49 | .unwrap_or(false) 50 | } 51 | 52 | fn set_with_systemd_resolved(&self) { 53 | // create `/etc/resolved.conf.d` folder if not exists 54 | let _ = run_cmd("mkdir", &["-p", "/etc/systemd/resolved.conf.d"]); 55 | // create `/etc/resolved.conf.d/00-dns.conf` file 56 | let mut dns_conf = OpenOptions::new() 57 | .write(true) 58 | .create(true) 59 | .truncate(true) 60 | .open(RESOLVED_OVERRIDE_PATH) 61 | .unwrap(); 62 | // 172.17.0.1 is the host ip in docker default network. Set it as the second DNS server 63 | // to resolve the domain in the docker. It's required by docker containers in the default 64 | // network and when building images. 65 | dns_conf.write_all(b"[Resolve]\n").unwrap(); 66 | for d in &self.dns { 67 | dns_conf 68 | .write_all(format!("DNS={}\n", d).as_bytes()) 69 | .unwrap(); 70 | } 71 | dns_conf.write_all(b"Domains=~.\n").unwrap(); 72 | // restart systemd-resolved 73 | let _ = run_cmd("systemctl", &["restart", "systemd-resolved.service"]); 74 | } 75 | 76 | pub fn original_dns(&self) -> Vec { 77 | self.original_dns.clone() 78 | } 79 | 80 | pub fn start(&mut self) { 81 | if self.use_resolved { 82 | self.set_with_systemd_resolved(); 83 | } else { 84 | self.set_with_dnsresolv_conf(); 85 | } 86 | } 87 | } 88 | 89 | impl Drop for DNSSetup { 90 | fn drop(&mut self) { 91 | if self.use_resolved { 92 | info!("Restore original DNS"); 93 | let _ = run_cmd("rm", &["-f", RESOLVED_OVERRIDE_PATH]); 94 | let _ = run_cmd("systemctl", &["restart", "systemd-resolved.service"]); 95 | } else { 96 | info!("Restore original DNS: {:?}", self.original_dns); 97 | let mut resolv = OpenOptions::new() 98 | .write(true) 99 | .truncate(true) 100 | .open(RESOLV_PATH) 101 | .unwrap(); 102 | resolv 103 | .write_all(generate_resolve_file(&self.original_dns).as_slice()) 104 | .unwrap(); 105 | } 106 | } 107 | } 108 | 109 | pub fn setup_ip(tun_name: &str, ip: &str, cidr: &str, additional_cidrs: Vec) { 110 | let _ = run_cmd("ip", &["addr", "add", ip, "dev", tun_name]); 111 | let _ = run_cmd("ip", &["link", "set", tun_name, "up"]); 112 | let _ = run_cmd("ip", &["route", "add", cidr, "via", ip, "dev", tun_name]); 113 | for additional_cidr in additional_cidrs { 114 | let _ = run_cmd( 115 | "ip", 116 | &[ 117 | "route", 118 | "add", 119 | additional_cidr.as_str(), 120 | "via", 121 | ip, 122 | "dev", 123 | tun_name, 124 | ], 125 | ); 126 | } 127 | } 128 | 129 | pub fn get_current_dns() -> Vec { 130 | let mut resolv = OpenOptions::new().read(true).open(RESOLV_PATH).unwrap(); 131 | let mut buf = vec![]; 132 | let _ = resolv.read_to_end(&mut buf).unwrap(); 133 | let content = std::str::from_utf8(&buf).unwrap(); 134 | 135 | let dns_list: Vec = content 136 | .lines() 137 | .filter(|l| l.contains("nameserver")) 138 | .filter_map(|l| l.split_whitespace().last()) 139 | .filter_map(|ip| ip.parse::().ok()) 140 | .map(|ip| ip.to_string()) 141 | .collect(); 142 | dns_list 143 | } 144 | 145 | fn generate_resolve_file(dns: &[String]) -> Vec { 146 | let mut content = Vec::new(); 147 | for d in dns { 148 | if !d.is_empty() { 149 | content.extend_from_slice(format!("nameserver {}\n", d).as_bytes()); 150 | } 151 | } 152 | content 153 | } 154 | -------------------------------------------------------------------------------- /sysconfig/src/net/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::command::run_cmd; 2 | 3 | #[cfg(any( 4 | target_os = "macos", 5 | target_os = "ios", 6 | target_os = "freebsd", 7 | target_os = "openbsd" 8 | ))] 9 | const IP_FORWARDING_KEY: &str = "net.inet.ip.forwarding"; 10 | #[cfg(target_os = "linux")] 11 | const IP_FORWARDING_KEY: &str = "net.ipv4.ip_forward"; 12 | 13 | pub struct IpForward { 14 | original_option: usize, 15 | } 16 | 17 | impl IpForward { 18 | #[allow(clippy::new_without_default)] 19 | pub fn new() -> Self { 20 | let output = run_cmd("sysctl", &["-n", IP_FORWARDING_KEY]); 21 | let option = output.trim().parse::().unwrap(); 22 | let _ = run_cmd("sysctl", &["-w", &format!("{}={}", IP_FORWARDING_KEY, 1)]); 23 | IpForward { 24 | original_option: option, 25 | } 26 | } 27 | } 28 | 29 | impl Drop for IpForward { 30 | fn drop(&mut self) { 31 | let _ = run_cmd( 32 | "sysctl", 33 | &[ 34 | "-w", 35 | &format!("{}={}", IP_FORWARDING_KEY, self.original_option), 36 | ], 37 | ); 38 | } 39 | } 40 | 41 | #[cfg(any(target_os = "macos", target_os = "ios"))] 42 | #[path = "darwin.rs"] 43 | pub mod sys; 44 | 45 | #[cfg(target_os = "linux")] 46 | #[path = "linux.rs"] 47 | pub mod sys; 48 | 49 | pub use sys::{get_current_dns, setup_ip, DNSSetup}; 50 | -------------------------------------------------------------------------------- /sysconfig/src/proc/darwin.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use super::SocketInfo; 3 | use libproc::libproc::proc_pid::{ 4 | listpidinfo, listpids, pidfdinfo, InSockInfo, ListFDs, ProcFDType, ProcType, SocketFDInfo, 5 | SocketInfoKind, 6 | }; 7 | use std::collections::HashMap; 8 | use std::io::Result; 9 | use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; 10 | 11 | pub fn list_system_proc_socks() -> Result>> { 12 | let pids = listpids(ProcType::ProcAllPIDS, 0)?; 13 | let mut pid_sockaddr_map = HashMap::new(); 14 | for pid in pids { 15 | let pid = pid as i32; 16 | pid_sockaddr_map.insert(pid, list_sockaddr(pid)?); 17 | } 18 | 19 | Ok(pid_sockaddr_map) 20 | } 21 | 22 | pub fn list_user_proc_socks(uid: u32) -> Result>> { 23 | let pids = listpids(ProcType::ProcUIDOnly, uid)?; 24 | let mut pid_sockaddr_map = HashMap::new(); 25 | for pid in pids { 26 | let pid = pid as i32; 27 | let socket_infos = list_sockaddr(pid)?; 28 | if !socket_infos.is_empty() { 29 | pid_sockaddr_map.insert(pid, socket_infos); 30 | } 31 | } 32 | 33 | Ok(pid_sockaddr_map) 34 | } 35 | 36 | fn list_sockaddr(pid: i32) -> Result> { 37 | let mut addrs = vec![]; 38 | for fd in listpidinfo::(pid, 4000)? { 39 | if let ProcFDType::Socket = fd.proc_fdtype.into() { 40 | if let Ok(socket) = pidfdinfo::(pid, fd.proc_fd) { 41 | if let SocketInfoKind::Tcp = socket.psi.soi_kind.into() { 42 | // access to the member of `soi_proto` is unsafe becasuse of union type. 43 | let info = unsafe { socket.psi.soi_proto.pri_tcp }; 44 | let local = get_local_addr(info.tcpsi_ini, socket.psi.soi_family); 45 | let remote = get_foreign_addr(info.tcpsi_ini, socket.psi.soi_family); 46 | addrs.push(SocketInfo { local, remote }); 47 | } 48 | } 49 | } 50 | } 51 | 52 | Ok(addrs) 53 | } 54 | 55 | fn get_local_addr(in_sock_info: InSockInfo, family: i32) -> SocketAddr { 56 | // change endian and cut off because insi_lport is network endian and 16bit witdh. 57 | let mut port = 0; 58 | port |= in_sock_info.insi_lport >> 8 & 0x00ff; 59 | port |= in_sock_info.insi_lport << 8 & 0xff00; 60 | 61 | if family == libc::AF_INET { 62 | // access to the member of `insi_laddr` is unsafe becasuse of union type. 63 | let s_addr = unsafe { in_sock_info.insi_laddr.ina_46.i46a_addr4.s_addr }; 64 | 65 | // s_addr is in bit endian, and Ipv4Addr::from needs small endian. 66 | let ip = Ipv4Addr::from(s_addr.swap_bytes()).into(); 67 | SocketAddr::new(ip, port as u16) 68 | } else { 69 | // access to the member of `insi_laddr` is unsafe becasuse of union type. 70 | let s_addr = unsafe { in_sock_info.insi_laddr.ina_6.s6_addr }; 71 | let ip = Ipv6Addr::from(s_addr).into(); 72 | SocketAddr::new(ip, port as u16) 73 | } 74 | } 75 | 76 | fn get_foreign_addr(in_sock_info: InSockInfo, family: i32) -> SocketAddr { 77 | // change endian and cut off because insi_lport is network endian and 16bit witdh. 78 | let mut port = 0; 79 | port |= in_sock_info.insi_fport >> 8 & 0x00ff; 80 | port |= in_sock_info.insi_fport << 8 & 0xff00; 81 | 82 | if family == libc::AF_INET { 83 | // access to the member of `insi_faddr` is unsafe becasuse of union type. 84 | let s_addr = unsafe { in_sock_info.insi_faddr.ina_46.i46a_addr4.s_addr }; 85 | 86 | // s_addr is in bit endian, and Ipv4Addr::from needs small endian. 87 | let ip = Ipv4Addr::from(s_addr.swap_bytes()).into(); 88 | SocketAddr::new(ip, port as u16) 89 | } else { 90 | // access to the member of `insi_faddr` is unsafe becasuse of union type. 91 | let s_addr = unsafe { in_sock_info.insi_faddr.ina_6.s6_addr }; 92 | let ip = Ipv6Addr::from(s_addr).into(); 93 | SocketAddr::new(ip, port as u16) 94 | } 95 | } 96 | 97 | #[cfg(test)] 98 | mod test { 99 | use super::*; 100 | use libc; 101 | 102 | #[test] 103 | fn test_list_system_proc_socks() { 104 | assert!(list_system_proc_socks().unwrap().len() > 1); 105 | } 106 | 107 | #[test] 108 | fn test_list_user_proc_socks() { 109 | let uid = unsafe { libc::getuid() }; 110 | let _socket = std::net::TcpListener::bind("0.0.0.0:8888").unwrap(); 111 | let s = list_user_proc_socks(uid).unwrap(); 112 | assert!(s 113 | .values() 114 | .find(|sockets| sockets.iter().find(|s| s.local.port() == 8888).is_some()) 115 | .is_some()); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /sysconfig/src/proc/linux.rs: -------------------------------------------------------------------------------- 1 | use crate::SocketInfo; 2 | use procfs::process::FDTarget; 3 | use procfs::{ProcError, ProcResult}; 4 | use std::collections::HashMap; 5 | use std::io::Result; 6 | 7 | fn to_io_error(e: ProcError) -> std::io::Error { 8 | std::io::Error::new(std::io::ErrorKind::Other, e) 9 | } 10 | pub fn list_system_proc_socks() -> Result>> { 11 | _list_system_proc_socks().map_err(to_io_error) 12 | } 13 | 14 | fn _list_system_proc_socks() -> ProcResult>> { 15 | let all_procs = procfs::process::all_processes()?; 16 | 17 | // build up a map between socket inodes and processes: 18 | let mut map = HashMap::new(); 19 | for process in all_procs { 20 | let process = process?; 21 | let pid = process.pid(); 22 | for fd in process.fd()? { 23 | if let FDTarget::Socket(inode) = fd?.target { 24 | map.insert(inode, pid); 25 | } 26 | } 27 | } 28 | 29 | let mut socks_map = HashMap::new(); 30 | // get the tcp table 31 | let tcp = procfs::net::tcp().unwrap(); 32 | let tcp6 = procfs::net::tcp6().unwrap(); 33 | for entry in tcp.into_iter().chain(tcp6) { 34 | // find the process (if any) that has an open FD to this entry's inode 35 | if let Some(pid) = map.get(&entry.inode) { 36 | let item = socks_map.entry(*pid).or_insert(vec![]); 37 | item.push(SocketInfo { 38 | local: entry.local_address, 39 | remote: entry.remote_address, 40 | }); 41 | } 42 | } 43 | Ok(socks_map) 44 | } 45 | 46 | pub fn list_user_proc_socks(expected_uid: u32) -> Result>> { 47 | _list_user_proc_socks(expected_uid).map_err(to_io_error) 48 | } 49 | 50 | fn _list_user_proc_socks(expected_uid: u32) -> ProcResult>> { 51 | let all_procs = procfs::process::all_processes()?; 52 | 53 | // build up a map between socket inodes and processes: 54 | let mut map = HashMap::new(); 55 | for process in all_procs { 56 | let p = process?; 57 | let pid = p.pid(); 58 | if expected_uid != p.uid()? { 59 | continue; 60 | } 61 | 62 | for fd in p.fd()? { 63 | if let FDTarget::Socket(inode) = fd?.target { 64 | map.insert(inode, pid); 65 | } 66 | } 67 | } 68 | let mut socks_map = HashMap::new(); 69 | // get the tcp table 70 | let tcp = procfs::net::tcp().unwrap(); 71 | let tcp6 = procfs::net::tcp6().unwrap(); 72 | for entry in tcp.into_iter().chain(tcp6) { 73 | // find the process (if any) that has an open FD to this entry's inode 74 | if let Some(pid) = map.get(&entry.inode) { 75 | let item = socks_map.entry(*pid).or_insert(vec![]); 76 | item.push(SocketInfo { 77 | local: entry.local_address, 78 | remote: entry.remote_address, 79 | }); 80 | } 81 | } 82 | Ok(socks_map) 83 | } 84 | 85 | #[cfg(test)] 86 | mod test { 87 | use super::*; 88 | use libc; 89 | 90 | #[test] 91 | fn test_list_user_proc_socks() { 92 | let uid = unsafe { libc::getuid() }; 93 | let _socket = std::net::TcpListener::bind("0.0.0.0:65532").unwrap(); 94 | let s = list_user_proc_socks(uid).unwrap(); 95 | assert!(s 96 | .values() 97 | .any(|sockets| sockets.iter().any(|s| s.local.port() == 65532))); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /sysconfig/src/proc/mod.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] 4 | pub struct SocketInfo { 5 | pub local: SocketAddr, 6 | pub remote: SocketAddr, 7 | } 8 | 9 | #[cfg(target_os = "macos")] 10 | #[path = "darwin.rs"] 11 | pub mod sys; 12 | 13 | #[cfg(target_os = "linux")] 14 | #[path = "linux.rs"] 15 | pub mod sys; 16 | -------------------------------------------------------------------------------- /sysconfig/src/ulimit.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | pub fn set_rlimit_no_file(no: u64) -> io::Result<()> { 4 | // #[cfg(target_pointer_width = "32")] 5 | // let rlim = libc::rlimit { 6 | // rlim_cur: no as u32, 7 | // rlim_max: no as u32, 8 | // }; 9 | // #[cfg(target_pointer_width = "64")] 10 | let rlim = libc::rlimit { 11 | rlim_cur: no, 12 | rlim_max: no, 13 | }; 14 | let ret = unsafe { libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) }; 15 | if ret == -1 { 16 | return Err(io::Error::last_os_error()); 17 | } 18 | Ok(()) 19 | } 20 | 21 | pub fn get_rlimit_no_file() -> io::Result { 22 | let mut rlim = libc::rlimit { 23 | rlim_cur: 0, 24 | rlim_max: 0, 25 | }; 26 | let ret = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) }; 27 | if ret == -1 { 28 | return Err(io::Error::last_os_error()); 29 | } 30 | Ok(rlim) 31 | } 32 | -------------------------------------------------------------------------------- /tcp_connection/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tcp_connection" 3 | version = "20250331.0.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | dyn-clone = { workspace = true } 10 | async-std = { workspace = true } 11 | nanorand = { workspace = true } 12 | memchr = { workspace = true } 13 | serde = { workspace = true, features = ["derive"] } 14 | base64 = { workspace = true } 15 | 16 | [dev-dependencies] 17 | async-std = { workspace = true, features = ["attributes"] } 18 | 19 | [target.'cfg(target_arch = "x86_64")'.dev-dependencies] 20 | testcontainers = { workspace = true } 21 | -------------------------------------------------------------------------------- /tcp_connection/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod obfs_http; 2 | mod obfs_tls; 3 | 4 | use async_std::{ 5 | io::{Read, Write}, 6 | net::TcpStream, 7 | }; 8 | use dyn_clone::DynClone; 9 | 10 | use obfs_http::ObfsHttpTcpStream; 11 | use obfs_tls::ObfsTlsTcpStream; 12 | use serde::Deserialize; 13 | 14 | use std::{ 15 | fmt::Debug, 16 | io::{IoSlice, IoSliceMut, Result}, 17 | net::SocketAddr, 18 | pin::Pin, 19 | }; 20 | 21 | pub trait Connection: Read + Write + Unpin + Send + Sync + DynClone {} 22 | 23 | dyn_clone::clone_trait_object!(Connection); 24 | 25 | /// Combined async reader and writer, `futures 0.3` version. 26 | /// Note that this struct is only present in `readwrite` if "asyncstd" Cargo feature is enabled. 27 | #[derive(Clone)] 28 | pub struct TcpConnection { 29 | inner: Box, 30 | } 31 | 32 | #[derive(Clone, Copy, PartialEq, Eq, Debug, Deserialize)] 33 | pub enum ObfsMode { 34 | Http, 35 | Tls, 36 | } 37 | 38 | impl Connection for TcpStream {} 39 | 40 | impl TcpConnection { 41 | pub async fn connect_obfs( 42 | addr: SocketAddr, 43 | host: String, 44 | mode: ObfsMode, 45 | ) -> std::io::Result { 46 | let conn = match mode { 47 | ObfsMode::Http => { 48 | Box::new(ObfsHttpTcpStream::connect(addr, host).await?) as Box 49 | } 50 | ObfsMode::Tls => { 51 | Box::new(ObfsTlsTcpStream::connect(addr, host).await?) as Box 52 | } 53 | }; 54 | 55 | Ok(TcpConnection { inner: conn }) 56 | } 57 | 58 | pub async fn connect_tcp(addr: SocketAddr) -> std::io::Result { 59 | let conn = Box::new(TcpStream::connect(addr).await?); 60 | 61 | Ok(TcpConnection { inner: conn }) 62 | } 63 | 64 | pub fn new(conn: TcpStream) -> Self { 65 | TcpConnection { 66 | inner: Box::new(conn), 67 | } 68 | } 69 | } 70 | 71 | impl Read for TcpConnection { 72 | fn poll_read( 73 | mut self: Pin<&mut Self>, 74 | cx: &mut std::task::Context<'_>, 75 | buf: &mut [u8], 76 | ) -> std::task::Poll> { 77 | Pin::new(&mut self.inner).poll_read(cx, buf) 78 | } 79 | 80 | fn poll_read_vectored( 81 | mut self: Pin<&mut Self>, 82 | cx: &mut std::task::Context<'_>, 83 | bufs: &mut [IoSliceMut<'_>], 84 | ) -> std::task::Poll> { 85 | Pin::new(&mut self.inner).poll_read_vectored(cx, bufs) 86 | } 87 | } 88 | 89 | impl Write for TcpConnection { 90 | fn poll_write_vectored( 91 | mut self: Pin<&mut Self>, 92 | cx: &mut std::task::Context<'_>, 93 | bufs: &[IoSlice<'_>], 94 | ) -> std::task::Poll> { 95 | Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) 96 | } 97 | 98 | fn poll_write( 99 | mut self: Pin<&mut Self>, 100 | cx: &mut std::task::Context<'_>, 101 | buf: &[u8], 102 | ) -> std::task::Poll> { 103 | Pin::new(&mut self.inner).poll_write(cx, buf) 104 | } 105 | 106 | fn poll_flush( 107 | mut self: Pin<&mut Self>, 108 | cx: &mut std::task::Context<'_>, 109 | ) -> std::task::Poll> { 110 | Pin::new(&mut self.inner).poll_flush(cx) 111 | } 112 | 113 | fn poll_close( 114 | mut self: Pin<&mut Self>, 115 | cx: &mut std::task::Context<'_>, 116 | ) -> std::task::Poll> { 117 | Pin::new(&mut self.inner).poll_close(cx) 118 | } 119 | } 120 | 121 | /// Run a simple-obfs server in a docker container. 122 | /// The server listens on port 8388 and forwards all traffic to 123 | /// 127.0.0.1:12345 124 | /// The server will be stopped when the returned container is dropped. 125 | #[cfg(test)] 126 | #[cfg(all(target_arch = "x86_64", target_env = "gnu"))] 127 | fn run_obfs_server( 128 | mode: &str, 129 | server_port: usize, 130 | forward_port: usize, 131 | ) -> testcontainers::Container { 132 | use testcontainers::core::WaitFor; 133 | use testcontainers::runners::SyncRunner; 134 | use testcontainers::{GenericImage, ImageExt}; 135 | 136 | let wait_for = WaitFor::message_on_stderr(format!("listening at 0.0.0.0:{server_port}")); 137 | GenericImage::new("gists/simple-obfs", "latest") 138 | .with_wait_for(wait_for) 139 | .with_env_var("FORWARD", format!("127.0.0.1:{forward_port}")) 140 | .with_env_var("SERVER_PORT", server_port.to_string()) 141 | .with_env_var("OBFS_OPTS", mode) 142 | .with_network("host") 143 | .start() 144 | .unwrap() 145 | } 146 | -------------------------------------------------------------------------------- /tun_nat/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tun_nat" 3 | version = "20250331.0.0" 4 | authors = ["gfreezy "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | libc = { workspace = true } 11 | sysconfig = { path = "../sysconfig" } 12 | parking_lot = { workspace = true } 13 | bitvec = { workspace = true } 14 | smoltcp = { workspace = true, features = ["proto-ipv6", "proto-ipv4", "std"] } 15 | tracing = { workspace = true } 16 | crossbeam-channel = "0.5.8" 17 | object-pool = "0.6.0" 18 | -------------------------------------------------------------------------------- /tun_nat/examples/echo.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Write}; 2 | use std::net::{TcpListener, UdpSocket}; 3 | use std::thread; 4 | 5 | fn main() { 6 | thread::spawn(move || { 7 | let udp_socket = UdpSocket::bind("0.0.0.0:1300").unwrap(); 8 | let mut buf = vec![0; 1500]; 9 | loop { 10 | let (size, addr) = udp_socket.recv_from(&mut buf).unwrap(); 11 | println!("recv {} bytes from {}", size, &addr); 12 | udp_socket.send_to(&buf[..size], addr).unwrap(); 13 | } 14 | }); 15 | 16 | let listener = TcpListener::bind("0.0.0.0:1300").unwrap(); 17 | while let Ok((mut conn, addr)) = listener.accept() { 18 | thread::spawn(move || { 19 | let mut buf = vec![0; 1500]; 20 | let size = conn.read(&mut buf).unwrap(); 21 | println!("recv {} bytes from {}", size, &addr); 22 | conn.write_all(&buf[..size]).unwrap(); 23 | }); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /tun_nat/src/tun_socket/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(any(target_os = "macos", target_os = "ios"))] 2 | #[path = "tun_darwin.rs"] 3 | pub mod tun; 4 | 5 | #[cfg(target_os = "linux")] 6 | #[path = "tun_linux.rs"] 7 | pub mod tun; 8 | 9 | pub use self::tun::TunSocket; 10 | -------------------------------------------------------------------------------- /tun_nat/src/tun_socket/tun_linux.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | #![allow(dead_code)] 4 | use libc::*; 5 | use std::io::{Error, ErrorKind, Read, Result, Write}; 6 | use std::os::unix::io::{AsRawFd, RawFd}; 7 | use std::sync::Arc; 8 | const TUNSETIFF: u64 = 0x4004_54ca; 9 | 10 | #[repr(C)] 11 | union IfrIfru { 12 | ifru_addr: sockaddr, 13 | ifru_addr_v4: sockaddr_in, 14 | ifru_addr_v6: sockaddr_in, 15 | ifru_dstaddr: sockaddr, 16 | ifru_broadaddr: sockaddr, 17 | ifru_flags: c_short, 18 | ifru_metric: c_int, 19 | ifru_mtu: c_int, 20 | ifru_phys: c_int, 21 | ifru_media: c_int, 22 | ifru_intval: c_int, 23 | //ifru_data: caddr_t, 24 | //ifru_devmtu: ifdevmtu, 25 | //ifru_kpi: ifkpi, 26 | ifru_wake_flags: u32, 27 | ifru_route_refcnt: u32, 28 | ifru_cap: [c_int; 2], 29 | ifru_functional_type: u32, 30 | } 31 | 32 | #[repr(C)] 33 | pub struct ifreq { 34 | ifr_name: [c_uchar; IFNAMSIZ], 35 | ifr_ifru: IfrIfru, 36 | } 37 | 38 | #[derive(Default, Debug, Clone)] 39 | pub struct TunSocket { 40 | fd: Arc, 41 | name: String, 42 | } 43 | 44 | impl Drop for TunSocket { 45 | fn drop(&mut self) { 46 | if Arc::strong_count(&self.fd) == 1 { 47 | unsafe { close(*self.fd) }; 48 | } 49 | } 50 | } 51 | 52 | impl AsRawFd for TunSocket { 53 | fn as_raw_fd(&self) -> RawFd { 54 | *self.fd 55 | } 56 | } 57 | 58 | impl TunSocket { 59 | pub fn new(name: &str) -> Result { 60 | let fd = match unsafe { open(b"/dev/net/tun\0".as_ptr() as _, O_RDWR) } { 61 | -1 => return Err(Error::last_os_error()), 62 | fd => fd, 63 | }; 64 | 65 | let iface_name = name.as_bytes(); 66 | let mut ifr = ifreq { 67 | ifr_name: [0; IFNAMSIZ], 68 | ifr_ifru: IfrIfru { 69 | ifru_flags: (IFF_TUN | IFF_NO_PI | IFF_MULTI_QUEUE) as _, 70 | }, 71 | }; 72 | 73 | if iface_name.len() >= ifr.ifr_name.len() { 74 | return Err(Error::new(ErrorKind::Other, "Invalid tun name")); 75 | } 76 | 77 | ifr.ifr_name[..iface_name.len()].copy_from_slice(iface_name); 78 | 79 | if unsafe { ioctl(fd, TUNSETIFF as _, &ifr) } < 0 { 80 | return Err(Error::last_os_error()); 81 | } 82 | 83 | let name = name.to_string(); 84 | 85 | Ok(TunSocket { 86 | fd: Arc::new(fd), 87 | name, 88 | }) 89 | } 90 | 91 | pub fn new_queue(&self) -> Result { 92 | let tun = TunSocket::new(&self.name)?; 93 | Ok(tun) 94 | } 95 | 96 | pub fn name(&self) -> Result { 97 | Ok(self.name.clone()) 98 | } 99 | 100 | pub fn set_non_blocking(self) -> Result { 101 | match unsafe { fcntl(*self.fd, F_GETFL) } { 102 | -1 => Err(Error::last_os_error()), 103 | flags => match unsafe { fcntl(*self.fd, F_SETFL, flags | O_NONBLOCK) } { 104 | -1 => Err(Error::last_os_error()), 105 | _ => Ok(self), 106 | }, 107 | } 108 | } 109 | 110 | /// Get the current MTU value 111 | pub fn mtu(&self) -> Result { 112 | let fd = match unsafe { socket(AF_INET, SOCK_STREAM, IPPROTO_IP) } { 113 | -1 => return Err(Error::last_os_error()), 114 | fd => fd, 115 | }; 116 | 117 | let name = self.name()?; 118 | let iface_name: &[u8] = name.as_ref(); 119 | let mut ifr = ifreq { 120 | ifr_name: [0; IF_NAMESIZE], 121 | ifr_ifru: IfrIfru { ifru_mtu: 0 }, 122 | }; 123 | 124 | ifr.ifr_name[..iface_name.len()].copy_from_slice(iface_name); 125 | 126 | if unsafe { ioctl(fd, SIOCGIFMTU as _, &ifr) } < 0 { 127 | return Err(Error::last_os_error()); 128 | } 129 | 130 | unsafe { close(fd) }; 131 | 132 | Ok(unsafe { ifr.ifr_ifru.ifru_mtu } as _) 133 | } 134 | } 135 | 136 | impl Read for TunSocket { 137 | fn read(&mut self, buf: &mut [u8]) -> Result { 138 | match unsafe { read(*self.fd, buf.as_mut_ptr() as _, buf.len()) } { 139 | -1 => Err(Error::last_os_error()), 140 | n => Ok(n as usize), 141 | } 142 | } 143 | } 144 | 145 | impl Write for TunSocket { 146 | fn write(&mut self, buf: &[u8]) -> Result { 147 | match unsafe { write(*self.fd, buf.as_ptr() as _, buf.len() as _) } { 148 | -1 => Ok(0), 149 | n => Ok(n as usize), 150 | } 151 | } 152 | 153 | fn flush(&mut self) -> Result<()> { 154 | Ok(()) 155 | } 156 | } 157 | 158 | impl Read for &TunSocket { 159 | fn read(&mut self, buf: &mut [u8]) -> Result { 160 | match unsafe { read(*self.fd, buf.as_mut_ptr() as _, buf.len()) } { 161 | -1 => Err(Error::last_os_error()), 162 | n => Ok(n as usize), 163 | } 164 | } 165 | } 166 | 167 | impl Write for &TunSocket { 168 | fn write(&mut self, buf: &[u8]) -> Result { 169 | match unsafe { write(*self.fd, buf.as_ptr() as _, buf.len() as _) } { 170 | -1 => Ok(0), 171 | n => Ok(n as usize), 172 | } 173 | } 174 | 175 | fn flush(&mut self) -> Result<()> { 176 | Ok(()) 177 | } 178 | } 179 | --------------------------------------------------------------------------------