├── .github
├── dependabot.yml
└── workflows
│ └── build.yml
├── .gitignore
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
├── benches
├── certificate_authorities.rs
├── decoder.rs
└── proxy.rs
├── examples
├── ca
│ ├── hudsucker.cer
│ └── hudsucker.key
├── log.rs
├── noop.rs
└── openssl.rs
├── rustfmt.toml
├── src
├── body.rs
├── certificate_authority
│ ├── mod.rs
│ ├── openssl_authority.rs
│ └── rcgen_authority.rs
├── decoder.rs
├── error.rs
├── lib.rs
├── noop.rs
├── proxy
│ ├── builder.rs
│ ├── internal.rs
│ └── mod.rs
└── rewind.rs
└── tests
├── common
└── mod.rs
├── openssl_ca.rs
├── rcgen_ca.rs
└── websocket.rs
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: cargo
4 | directory: "/"
5 | schedule:
6 | interval: daily
7 | time: "07:00"
8 | timezone: Australia/Sydney
9 | open-pull-requests-limit: 99
10 | reviewers:
11 | - omjadas
12 | groups:
13 | rustls:
14 | patterns:
15 | - "hyper-rustls"
16 | - "tokio-rustls"
17 | tungstenite:
18 | patterns:
19 | - "hyper-tungstenite"
20 | - "tokio-tungstenite"
21 | - package-ecosystem: github-actions
22 | directory: "/"
23 | schedule:
24 | interval: daily
25 | time: "07:00"
26 | timezone: Australia/Sydney
27 | open-pull-requests-limit: 10
28 | reviewers:
29 | - omjadas
30 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches:
4 | - main
5 | pull_request:
6 | release:
7 | types:
8 | - released
9 |
10 | name: Build
11 |
12 | permissions:
13 | checks: write
14 | contents: read
15 |
16 | jobs:
17 | test:
18 | runs-on: ubuntu-latest
19 |
20 | strategy:
21 | fail-fast: false
22 | matrix:
23 | toolchain:
24 | - stable
25 | - beta
26 | - nightly
27 | - "1.85.0"
28 |
29 | steps:
30 | - uses: actions/checkout@v4
31 | - uses: dtolnay/rust-toolchain@master
32 | with:
33 | toolchain: ${{ matrix.toolchain }}
34 | - uses: Swatinem/rust-cache@v2.7.8
35 | - run: cargo build --release --all-targets --all-features
36 | - run: cargo test --release --all-features
37 |
38 | minimal-versions:
39 | runs-on: ubuntu-latest
40 |
41 | steps:
42 | - uses: actions/checkout@v4
43 | - uses: dtolnay/rust-toolchain@nightly
44 | - uses: Swatinem/rust-cache@v2.7.8
45 | - run: cargo update -Z direct-minimal-versions
46 | - run: cargo build --release --all-targets --all-features
47 |
48 | clippy:
49 | runs-on: ubuntu-latest
50 |
51 | steps:
52 | - uses: actions/checkout@v4
53 | - uses: dtolnay/rust-toolchain@stable
54 | with:
55 | components: clippy
56 | - uses: Swatinem/rust-cache@v2.7.8
57 | - run: cargo clippy --all-targets --all-features
58 |
59 | fmt:
60 | runs-on: ubuntu-latest
61 |
62 | steps:
63 | - uses: actions/checkout@v4
64 | - uses: dtolnay/rust-toolchain@nightly
65 | with:
66 | components: rustfmt
67 | - run: cargo fmt --all -- --check
68 |
69 | publish:
70 | if: github.event_name == 'release'
71 | runs-on: ubuntu-latest
72 | needs:
73 | - test
74 | - minimal-versions
75 | - clippy
76 | - fmt
77 |
78 | steps:
79 | - uses: actions/checkout@v4
80 | - uses: dtolnay/rust-toolchain@stable
81 | - run: cargo publish
82 | env:
83 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
84 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ### Linux ###
2 | *~
3 |
4 | # temporary files which can be created if a process still has a handle open of a deleted file
5 | .fuse_hidden*
6 |
7 | # KDE directory preferences
8 | .directory
9 |
10 | # Linux trash folder which might appear on any partition or disk
11 | .Trash-*
12 |
13 | # .nfs files are created when an open file is removed but is still being accessed
14 | .nfs*
15 |
16 | ### macOS ###
17 | # General
18 | .DS_Store
19 | .AppleDouble
20 | .LSOverride
21 |
22 | # Icon must end with two \r
23 | Icon
24 |
25 |
26 | # Thumbnails
27 | ._*
28 |
29 | # Files that might appear in the root of a volume
30 | .DocumentRevisions-V100
31 | .fseventsd
32 | .Spotlight-V100
33 | .TemporaryItems
34 | .Trashes
35 | .VolumeIcon.icns
36 | .com.apple.timemachine.donotpresent
37 |
38 | # Directories potentially created on remote AFP share
39 | .AppleDB
40 | .AppleDesktop
41 | Network Trash Folder
42 | Temporary Items
43 | .apdisk
44 |
45 | ### Rust ###
46 | # Generated by Cargo
47 | # will have compiled files and executables
48 | debug/
49 | target/
50 |
51 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
52 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
53 | Cargo.lock
54 |
55 | # These are backup files generated by rustfmt
56 | **/*.rs.bk
57 |
58 | # MSVC Windows builds of rustc generate these, which store debugging information
59 | *.pdb
60 |
61 | ### Vim ###
62 | # Swap
63 | [._]*.s[a-v][a-z]
64 | !*.svg # comment out if you don't need vector files
65 | [._]*.sw[a-p]
66 | [._]s[a-rt-v][a-z]
67 | [._]ss[a-gi-z]
68 | [._]sw[a-p]
69 |
70 | # Session
71 | Session.vim
72 | Sessionx.vim
73 |
74 | # Temporary
75 | .netrwhist
76 | # Auto-generated tag files
77 | tags
78 | # Persistent undo
79 | [._]*.un~
80 |
81 | ### VisualStudioCode ###
82 | .vscode/*
83 |
84 | # Local History for Visual Studio Code
85 | .history/
86 |
87 | ### VisualStudioCode Patch ###
88 | # Ignore all local history of files
89 | .history
90 | .ionide
91 |
92 | ### Windows ###
93 | # Windows thumbnail cache files
94 | Thumbs.db
95 | Thumbs.db:encryptable
96 | ehthumbs.db
97 | ehthumbs_vista.db
98 |
99 | # Dump file
100 | *.stackdump
101 |
102 | # Folder config file
103 | [Dd]esktop.ini
104 |
105 | # Recycle Bin used on file shares
106 | $RECYCLE.BIN/
107 |
108 | # Windows Installer files
109 | *.cab
110 | *.msi
111 | *.msix
112 | *.msm
113 | *.msp
114 |
115 | # Windows shortcuts
116 | *.lnk
117 |
118 | # JetBrains
119 | .idea/
120 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "hudsucker"
3 | version = "0.23.0"
4 | edition = "2024"
5 | rust-version = "1.85.0"
6 | description = "MITM HTTP/S proxy"
7 | documentation = "https://docs.rs/hudsucker"
8 | readme = "README.md"
9 | homepage = "https://github.com/omjadas/hudsucker"
10 | repository = "https://github.com/omjadas/hudsucker"
11 | license = "MIT OR Apache-2.0"
12 | keywords = ["http", "proxy"]
13 | categories = ["network-programming"]
14 | exclude = [".github/"]
15 |
16 | [package.metadata.docs.rs]
17 | all-features = true
18 | rustdoc-args = ["--cfg", "docsrs"]
19 |
20 | [dependencies]
21 | async-compression = { version = "0.4.0", features = ["tokio", "brotli", "gzip", "zlib", "zstd"], optional = true }
22 | bstr = "1.0.0"
23 | futures = "0.3.11"
24 | http = "1.1.0"
25 | http-body-util = "0.1.0"
26 | hyper = "1.1.0"
27 | hyper-rustls = { version = "0.27.0", default-features = false, features = ["http1", "logging", "tls12", "webpki-tokio"], optional = true }
28 | hyper-tls = { version = "0.6.0", optional = true }
29 | hyper-tungstenite = "0.17.0"
30 | hyper-util = { version="0.1.3", features = ["client-legacy", "server", "http1"] }
31 | moka = { version = "0.12.0", features = ["future"], optional = true }
32 | openssl = { version = "0.10.46", optional = true }
33 | rand = { version = "0.9.0", optional = true }
34 | rcgen = { version = "0.13.0", features = ["x509-parser"], optional = true }
35 | thiserror = "2.0.7"
36 | time = { version = "0.3.35", optional = true }
37 | tokio = { version = "1.24.2", features = ["macros", "rt"] }
38 | tokio-graceful = "0.2.0"
39 | tokio-native-tls = { version = "0.3.1", optional = true }
40 | tokio-rustls = { version = "0.26.0", features = ["logging", "tls12"] }
41 | tokio-tungstenite = "0.26.1"
42 | tokio-util = { version = "0.7.1", features = ["io"], optional = true }
43 | tracing = { version = "0.1.35", features = ["log"] }
44 |
45 | [dev-dependencies]
46 | async-http-proxy = { version = "1.2.5", features = ["runtime-tokio"] }
47 | criterion = { version = "0.6.0", features = ["async_tokio"] }
48 | reqwest = "0.12.0"
49 | rustls-native-certs = "0.8.0"
50 | rustls-pemfile = "2.0.0"
51 | tokio = { version = "1.24.2", features = ["full"] }
52 | tokio-native-tls = "0.3.1"
53 | tracing-subscriber = "0.3.8"
54 | x509-parser = "0.17.0"
55 |
56 | [features]
57 | decoder = ["dep:async-compression", "dep:tokio-util", "tokio/io-util"]
58 | default = ["decoder", "rcgen-ca", "rustls-client"]
59 | full = ["decoder", "http2", "native-tls-client", "openssl-ca", "rcgen-ca", "rustls-client"]
60 | http2 = ["hyper-util/http2", "hyper-rustls?/http2"]
61 | native-tls-client = ["dep:hyper-tls", "dep:tokio-native-tls", "tokio-tungstenite/native-tls"]
62 | openssl-ca = ["dep:openssl", "dep:moka"]
63 | rcgen-ca = ["dep:rcgen", "dep:moka", "dep:time", "dep:rand"]
64 | rustls-client = ["dep:hyper-rustls", "tokio-tungstenite/rustls-tls-webpki-roots"]
65 |
66 | [[example]]
67 | name = "log"
68 | required-features = ["rcgen-ca", "rustls-client"]
69 |
70 | [[example]]
71 | name = "noop"
72 | required-features = ["rcgen-ca", "rustls-client"]
73 |
74 | [[example]]
75 | name = "openssl"
76 | required-features = ["openssl-ca", "rustls-client"]
77 |
78 | [[test]]
79 | name = "openssl_ca"
80 | required-features = ["decoder", "openssl-ca", "native-tls-client", "rustls-client"]
81 |
82 | [[test]]
83 | name = "rcgen_ca"
84 | required-features = ["decoder", "rcgen-ca", "native-tls-client", "rustls-client"]
85 |
86 | [[test]]
87 | name = "websocket"
88 | required-features = ["decoder", "rcgen-ca", "native-tls-client", "rustls-client"]
89 |
90 | [[bench]]
91 | name = "certificate_authorities"
92 | harness = false
93 | required-features = ["openssl-ca", "rcgen-ca"]
94 |
95 | [[bench]]
96 | name = "decoder"
97 | harness = false
98 | required-features = ["decoder"]
99 |
100 | [[bench]]
101 | name = "proxy"
102 | harness = false
103 | required-features = ["rcgen-ca", "rustls-client"]
104 |
105 | [profile.bench]
106 | lto = true
107 | debug = true
108 | codegen-units = 1
109 |
--------------------------------------------------------------------------------
/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Omja Das
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # hudsucker
2 |
3 | [](https://crates.io/crates/hudsucker)
4 | [](https://docs.rs/hudsucker)
5 | [](https://github.com/omjadas/hudsucker/actions/workflows/build.yml)
6 |
7 | Hudsucker is a MITM HTTP/S proxy written in Rust that allows you to:
8 |
9 | - Modify HTTP/S requests
10 | - Modify HTTP/S responses
11 | - Modify WebSocket messages
12 |
13 | ## Features
14 |
15 | - `decoder`: Enables `decode_request` and `decode_response` helpers (enabled by default).
16 | - `full`: Enables all features.
17 | - `http2`: Enables HTTP/2 support.
18 | - `native-tls-client`: Enables `ProxyBuilder::with_native_tls_client`.
19 | - `openssl-ca`: Enables `certificate_authority::OpensslAuthority`.
20 | - `rcgen-ca`: Enables `certificate_authority::RcgenAuthority` (enabled by default).
21 | - `rustls-client`: Enables `ProxyBuilder::with_rustls_client` (enabled by default).
22 |
23 | ## Usage
24 |
25 | For usage, refer to the [provided examples](https://github.com/omjadas/hudsucker/tree/main/examples).
26 |
27 | ### Built With Hudsucker
28 |
29 | - [Cruster](https://github.com/sinKettu/cruster)
30 |
31 | ## License
32 |
33 | Licensed under either of
34 |
35 | - [Apache License, Version 2.0](LICENSE-APACHE)
36 | - [MIT license](LICENSE-MIT)
37 |
38 | at your option.
39 |
40 | ## Contribution
41 |
42 | Unless you explicitly state otherwise, any contribution intentionally submitted
43 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
44 | dual licensed as above, without any additional terms or conditions.
45 |
--------------------------------------------------------------------------------
/benches/certificate_authorities.rs:
--------------------------------------------------------------------------------
1 | use criterion::{Criterion, black_box, criterion_group, criterion_main};
2 | use http::uri::Authority;
3 | use hudsucker::{
4 | certificate_authority::{CertificateAuthority, OpensslAuthority, RcgenAuthority},
5 | openssl::{hash::MessageDigest, pkey::PKey, x509::X509},
6 | rcgen::{CertificateParams, KeyPair},
7 | rustls::crypto::aws_lc_rs,
8 | };
9 |
10 | fn runtime() -> tokio::runtime::Runtime {
11 | tokio::runtime::Builder::new_current_thread()
12 | .build()
13 | .unwrap()
14 | }
15 |
16 | fn build_rcgen_ca(cache_size: u64) -> RcgenAuthority {
17 | let key_pair = include_str!("../examples/ca/hudsucker.key");
18 | let ca_cert = include_str!("../examples/ca/hudsucker.cer");
19 | let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
20 | let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
21 | .expect("Failed to parse CA certificate")
22 | .self_signed(&key_pair)
23 | .expect("Failed to sign CA certificate");
24 |
25 | RcgenAuthority::new(key_pair, ca_cert, cache_size, aws_lc_rs::default_provider())
26 | }
27 |
28 | fn build_openssl_ca(cache_size: u64) -> OpensslAuthority {
29 | let private_key: &[u8] = include_bytes!("../examples/ca/hudsucker.key");
30 | let ca_cert: &[u8] = include_bytes!("../examples/ca/hudsucker.cer");
31 | let private_key = PKey::private_key_from_pem(private_key).expect("Failed to parse private key");
32 | let ca_cert = X509::from_pem(ca_cert).expect("Failed to parse CA certificate");
33 |
34 | OpensslAuthority::new(
35 | private_key,
36 | ca_cert,
37 | MessageDigest::sha256(),
38 | cache_size,
39 | aws_lc_rs::default_provider(),
40 | )
41 | }
42 |
43 | fn compare_cas(c: &mut Criterion) {
44 | let rcgen_ca = build_rcgen_ca(0);
45 | let openssl_ca = build_openssl_ca(0);
46 | let authority = Authority::from_static("example.com");
47 | let runtime = runtime();
48 |
49 | let mut group = c.benchmark_group("cas");
50 | group.bench_function("rcgen", |b| {
51 | b.to_async(&runtime)
52 | .iter(|| rcgen_ca.gen_server_config(black_box(&authority)))
53 | });
54 | group.bench_function("openssl", |b| {
55 | b.to_async(&runtime)
56 | .iter(|| openssl_ca.gen_server_config(black_box(&authority)))
57 | });
58 | group.finish();
59 | }
60 |
61 | fn rcgen_ca(c: &mut Criterion) {
62 | let cache_ca = build_rcgen_ca(1000);
63 | let no_cache_ca = build_rcgen_ca(0);
64 | let authority = Authority::from_static("example.com");
65 | let runtime = runtime();
66 |
67 | let mut group = c.benchmark_group("rcgen ca");
68 | group.bench_function("with cache", |b| {
69 | b.to_async(&runtime)
70 | .iter(|| cache_ca.gen_server_config(black_box(&authority)))
71 | });
72 | group.bench_function("without cache", |b| {
73 | b.to_async(&runtime)
74 | .iter(|| no_cache_ca.gen_server_config(black_box(&authority)))
75 | });
76 | group.finish();
77 | }
78 |
79 | fn openssl_ca(c: &mut Criterion) {
80 | let cache_ca = build_openssl_ca(1000);
81 | let no_cache_ca = build_openssl_ca(0);
82 | let authority = Authority::from_static("example.com");
83 | let runtime = runtime();
84 |
85 | let mut group = c.benchmark_group("openssl ca");
86 | group.bench_function("with cache", |b| {
87 | b.to_async(&runtime)
88 | .iter(|| cache_ca.gen_server_config(black_box(&authority)))
89 | });
90 | group.bench_function("without cache", |b| {
91 | b.to_async(&runtime)
92 | .iter(|| no_cache_ca.gen_server_config(black_box(&authority)))
93 | });
94 | group.finish();
95 | }
96 |
97 | criterion_group!(benches, compare_cas, rcgen_ca, openssl_ca);
98 | criterion_main!(benches);
99 |
--------------------------------------------------------------------------------
/benches/decoder.rs:
--------------------------------------------------------------------------------
1 | use async_compression::tokio::bufread::{BrotliEncoder, GzipEncoder};
2 | use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
3 | use hudsucker::{
4 | Body, decode_request, decode_response,
5 | hyper::{
6 | Request, Response,
7 | header::{CONTENT_ENCODING, CONTENT_LENGTH},
8 | },
9 | };
10 | use tokio::io::BufReader;
11 | use tokio_util::io::ReaderStream;
12 |
13 | const BODY: &[u8; 12] = b"Hello, World";
14 |
15 | fn raw_body() -> Body {
16 | Body::from(&BODY[..])
17 | }
18 |
19 | fn gzip_body() -> Body {
20 | let encoder = GzipEncoder::new(&BODY[..]);
21 | Body::from_stream(ReaderStream::new(encoder))
22 | }
23 |
24 | fn gzip_brotli_body() -> Body {
25 | let encoder = GzipEncoder::new(&BODY[..]);
26 | let encoder = BrotliEncoder::new(BufReader::new(encoder));
27 | Body::from_stream(ReaderStream::new(encoder))
28 | }
29 |
30 | fn raw_request() -> Request
{
31 | Request::builder()
32 | .header(CONTENT_LENGTH, BODY.len())
33 | .body(raw_body())
34 | .unwrap()
35 | }
36 |
37 | fn gzip_request() -> Request {
38 | Request::builder()
39 | .header(CONTENT_LENGTH, 123)
40 | .header(CONTENT_ENCODING, "gzip")
41 | .body(gzip_body())
42 | .unwrap()
43 | }
44 |
45 | fn gzip_brotli_request() -> Request {
46 | Request::builder()
47 | .header(CONTENT_LENGTH, 123)
48 | .header(CONTENT_ENCODING, "gzip, br")
49 | .body(gzip_brotli_body())
50 | .unwrap()
51 | }
52 |
53 | fn raw_response() -> Response {
54 | Response::builder()
55 | .header(CONTENT_LENGTH, BODY.len())
56 | .body(raw_body())
57 | .unwrap()
58 | }
59 |
60 | fn gzip_response() -> Response {
61 | Response::builder()
62 | .header(CONTENT_LENGTH, 123)
63 | .header(CONTENT_ENCODING, "gzip")
64 | .body(gzip_body())
65 | .unwrap()
66 | }
67 |
68 | fn gzip_brotli_response() -> Response {
69 | Response::builder()
70 | .header(CONTENT_LENGTH, BODY.len())
71 | .header(CONTENT_ENCODING, "gzip, br")
72 | .body(gzip_brotli_body())
73 | .unwrap()
74 | }
75 |
76 | fn bench_decode_request(c: &mut Criterion) {
77 | let mut group = c.benchmark_group("decode_request");
78 | group.bench_function("raw", |b| {
79 | b.iter_batched(raw_request, decode_request, BatchSize::SmallInput)
80 | });
81 | group.bench_function("gzip", |b| {
82 | b.iter_batched(gzip_request, decode_request, BatchSize::SmallInput)
83 | });
84 | group.bench_function("gzip, br", |b| {
85 | b.iter_batched(gzip_brotli_request, decode_request, BatchSize::SmallInput)
86 | });
87 | group.finish();
88 | }
89 |
90 | fn bench_decode_response(c: &mut Criterion) {
91 | let mut group = c.benchmark_group("decode_response");
92 | group.bench_function("raw", |b| {
93 | b.iter_batched(raw_response, decode_response, BatchSize::SmallInput)
94 | });
95 | group.bench_function("gzip", |b| {
96 | b.iter_batched(gzip_response, decode_response, BatchSize::SmallInput)
97 | });
98 | group.bench_function("gzip, br", |b| {
99 | b.iter_batched(gzip_brotli_response, decode_response, BatchSize::SmallInput)
100 | });
101 | group.finish();
102 | }
103 |
104 | criterion_group!(benches, bench_decode_request, bench_decode_response);
105 | criterion_main!(benches);
106 |
--------------------------------------------------------------------------------
/benches/proxy.rs:
--------------------------------------------------------------------------------
1 | use criterion::{Criterion, Throughput, criterion_group, criterion_main};
2 | use hudsucker::{
3 | Body, Proxy,
4 | certificate_authority::{CertificateAuthority, RcgenAuthority},
5 | hyper::{Method, Request, Response, body::Incoming, service::service_fn},
6 | hyper_util::{
7 | client::legacy::{Client, connect::HttpConnector},
8 | rt::{TokioExecutor, TokioIo},
9 | server::conn::auto,
10 | },
11 | rcgen::{CertificateParams, KeyPair},
12 | rustls::crypto::aws_lc_rs,
13 | };
14 | use reqwest::Certificate;
15 | use std::{convert::Infallible, net::SocketAddr};
16 | use tokio::{net::TcpListener, sync::oneshot::Sender};
17 | use tokio_graceful::Shutdown;
18 | use tokio_native_tls::native_tls;
19 |
20 | fn runtime() -> tokio::runtime::Runtime {
21 | tokio::runtime::Builder::new_current_thread()
22 | .enable_io()
23 | .enable_time()
24 | .build()
25 | .unwrap()
26 | }
27 |
28 | fn build_ca() -> RcgenAuthority {
29 | let key_pair = include_str!("../examples/ca/hudsucker.key");
30 | let ca_cert = include_str!("../examples/ca/hudsucker.cer");
31 | let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
32 | let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
33 | .expect("Failed to parse CA certificate")
34 | .self_signed(&key_pair)
35 | .expect("Failed to sign CA certificate");
36 |
37 | RcgenAuthority::new(key_pair, ca_cert, 1000, aws_lc_rs::default_provider())
38 | }
39 |
40 | async fn test_server(req: Request) -> Result, Infallible> {
41 | match (req.method(), req.uri().path()) {
42 | (&Method::GET, "/hello") => Ok(Response::new(Body::from("hello, world"))),
43 | _ => Ok(Response::new(Body::empty())),
44 | }
45 | }
46 |
47 | pub async fn start_http_server() -> Result<(SocketAddr, Sender<()>), Box> {
48 | let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await?;
49 | let addr = listener.local_addr()?;
50 | let (tx, rx) = tokio::sync::oneshot::channel();
51 |
52 | tokio::spawn(async move {
53 | let server = auto::Builder::new(TokioExecutor::new());
54 | let shutdown = Shutdown::new(async { rx.await.unwrap_or_default() });
55 | let guard = shutdown.guard_weak();
56 |
57 | loop {
58 | tokio::select! {
59 | res = listener.accept() => {
60 | let (tcp, _) = res.unwrap();
61 | let server = server.clone();
62 |
63 | shutdown.spawn_task(async move {
64 | server
65 | .serve_connection_with_upgrades(TokioIo::new(tcp), service_fn(test_server))
66 | .await
67 | .unwrap();
68 | });
69 | }
70 | _ = guard.cancelled() => {
71 | break;
72 | }
73 | }
74 | }
75 |
76 | shutdown.shutdown().await;
77 | });
78 |
79 | Ok((addr, tx))
80 | }
81 |
82 | pub async fn start_https_server(
83 | ca: impl CertificateAuthority,
84 | ) -> Result<(SocketAddr, Sender<()>), Box> {
85 | let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await?;
86 | let addr = listener.local_addr()?;
87 | let acceptor: tokio_rustls::TlsAcceptor = ca
88 | .gen_server_config(&"localhost".parse().unwrap())
89 | .await
90 | .into();
91 | let (tx, rx) = tokio::sync::oneshot::channel();
92 |
93 | tokio::spawn(async move {
94 | let server = auto::Builder::new(TokioExecutor::new());
95 | let shutdown = Shutdown::new(async { rx.await.unwrap_or_default() });
96 | let guard = shutdown.guard_weak();
97 |
98 | loop {
99 | tokio::select! {
100 | res = listener.accept() => {
101 | let (tcp, _) = res.unwrap();
102 | let tcp = acceptor.accept(tcp).await.unwrap();
103 | let server = server.clone();
104 |
105 | shutdown.spawn_task(async move {
106 | server
107 | .serve_connection_with_upgrades(TokioIo::new(tcp), service_fn(test_server))
108 | .await
109 | .unwrap();
110 | });
111 | }
112 | _ = guard.cancelled() => {
113 | break;
114 | }
115 | }
116 | }
117 |
118 | shutdown.shutdown().await;
119 | });
120 |
121 | Ok((addr, tx))
122 | }
123 |
124 | fn native_tls_client() -> Client, Body> {
125 | let mut http = HttpConnector::new();
126 | http.enforce_http(false);
127 | let ca_cert =
128 | native_tls::Certificate::from_pem(include_bytes!("../examples/ca/hudsucker.cer")).unwrap();
129 |
130 | let tls = native_tls::TlsConnector::builder()
131 | .add_root_certificate(ca_cert)
132 | .build()
133 | .unwrap()
134 | .into();
135 |
136 | let https = (http, tls).into();
137 |
138 | Client::builder(TokioExecutor::new()).build(https)
139 | }
140 |
141 | async fn start_proxy(
142 | ca: impl CertificateAuthority,
143 | ) -> Result<(SocketAddr, Sender<()>), Box> {
144 | let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await?;
145 | let addr = listener.local_addr()?;
146 | let (tx, rx) = tokio::sync::oneshot::channel();
147 | let proxy = Proxy::builder()
148 | .with_listener(listener)
149 | .with_ca(ca)
150 | .with_client(native_tls_client())
151 | .with_graceful_shutdown(async {
152 | rx.await.unwrap_or_default();
153 | })
154 | .build()
155 | .expect("Failed to create proxy");
156 |
157 | tokio::spawn(proxy.start());
158 |
159 | Ok((addr, tx))
160 | }
161 |
162 | fn build_client() -> reqwest::Client {
163 | let ca_cert = Certificate::from_pem(include_bytes!("../examples/ca/hudsucker.cer")).unwrap();
164 |
165 | reqwest::Client::builder()
166 | .add_root_certificate(ca_cert)
167 | .build()
168 | .unwrap()
169 | }
170 |
171 | fn build_proxied_client(proxy: &str) -> reqwest::Client {
172 | let proxy = reqwest::Proxy::all(proxy).unwrap();
173 | let ca_cert = Certificate::from_pem(include_bytes!("../examples/ca/hudsucker.cer")).unwrap();
174 |
175 | reqwest::Client::builder()
176 | .proxy(proxy)
177 | .add_root_certificate(ca_cert)
178 | .build()
179 | .unwrap()
180 | }
181 |
182 | fn bench_local(c: &mut Criterion) {
183 | let runtime = runtime();
184 | let _guard = runtime.enter();
185 |
186 | let (proxy_addr, stop_proxy) = runtime.block_on(start_proxy(build_ca())).unwrap();
187 | let (http_addr, stop_http) = runtime.block_on(start_http_server()).unwrap();
188 | let (https_addr, stop_https) = runtime.block_on(start_https_server(build_ca())).unwrap();
189 | let client = build_client();
190 | let proxied_client = build_proxied_client(&proxy_addr.to_string());
191 |
192 | let mut group = c.benchmark_group("proxy local site");
193 | group.throughput(Throughput::Elements(1));
194 | group.bench_function("HTTP without proxy", |b| {
195 | b.to_async(&runtime).iter(|| async {
196 | client
197 | .get(format!("http://{}/hello", http_addr))
198 | .send()
199 | .await
200 | .unwrap()
201 | })
202 | });
203 | group.bench_function("HTTP with proxy", |b| {
204 | b.to_async(&runtime).iter(|| async {
205 | proxied_client
206 | .get(format!("http://{}/hello", http_addr))
207 | .send()
208 | .await
209 | .unwrap()
210 | })
211 | });
212 | group.bench_function("HTTPS without proxy", |b| {
213 | b.to_async(&runtime).iter(|| async {
214 | client
215 | .get(format!("https://localhost:{}/hello", https_addr.port()))
216 | .send()
217 | .await
218 | .unwrap()
219 | })
220 | });
221 | group.bench_function("HTTPS with proxy", |b| {
222 | b.to_async(&runtime).iter(|| async {
223 | proxied_client
224 | .get(format!("https://localhost:{}/hello", https_addr.port()))
225 | .send()
226 | .await
227 | .unwrap()
228 | })
229 | });
230 | group.finish();
231 |
232 | stop_http.send(()).unwrap();
233 | stop_https.send(()).unwrap();
234 | stop_proxy.send(()).unwrap();
235 | }
236 |
237 | fn bench_remote(c: &mut Criterion) {
238 | let runtime = runtime();
239 | let _guard = runtime.enter();
240 |
241 | let (proxy_addr, stop_proxy) = runtime.block_on(start_proxy(build_ca())).unwrap();
242 | let client = build_client();
243 | let proxied_client = build_proxied_client(&proxy_addr.to_string());
244 |
245 | let mut group = c.benchmark_group("proxy remote site");
246 | group.throughput(Throughput::Elements(1));
247 | group.bench_function("HTTP without proxy", |b| {
248 | b.to_async(&runtime)
249 | .iter(|| async { client.get("http://echo.omjad.as").send().await.unwrap() })
250 | });
251 | group.bench_function("HTTP with proxy", |b| {
252 | b.to_async(&runtime).iter(|| async {
253 | proxied_client
254 | .get("http://echo.omjad.as")
255 | .send()
256 | .await
257 | .unwrap()
258 | })
259 | });
260 | group.bench_function("HTTPS without proxy", |b| {
261 | b.to_async(&runtime)
262 | .iter(|| async { client.get("https://echo.omjad.as").send().await.unwrap() })
263 | });
264 | group.bench_function("HTTPS with proxy", |b| {
265 | b.to_async(&runtime).iter(|| async {
266 | proxied_client
267 | .get("https://echo.omjad.as")
268 | .send()
269 | .await
270 | .unwrap()
271 | })
272 | });
273 | group.finish();
274 |
275 | let _ = stop_proxy.send(());
276 | }
277 |
278 | criterion_group!(benches, bench_local, bench_remote);
279 | criterion_main!(benches);
280 |
--------------------------------------------------------------------------------
/examples/ca/hudsucker.cer:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDkzCCAnugAwIBAgIJAMdLw5xpuf6yMA0GCSqGSIb3DQEBCwUAMGYxHTAbBgNV
3 | BAMMFEh1ZHN1Y2tlciBJbmR1c3RyaWVzMR0wGwYDVQQKDBRIdWRzdWNrZXIgSW5k
4 | dXN0cmllczELMAkGA1UEBgwCVVMxCzAJBgNVBAgMAk5ZMQwwCgYDVQQHDANOWUMw
5 | IBcNNzUwMTAxMDAwMDAwWhgPNDA5NjAxMDEwMDAwMDBaMGYxHTAbBgNVBAMMFEh1
6 | ZHN1Y2tlciBJbmR1c3RyaWVzMR0wGwYDVQQKDBRIdWRzdWNrZXIgSW5kdXN0cmll
7 | czELMAkGA1UEBgwCVVMxCzAJBgNVBAgMAk5ZMQwwCgYDVQQHDANOWUMwggEiMA0G
8 | CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCaHrZ9VUL7SKqgJdeG/dciikuummsr
9 | Ds4Cn+p01J5mURQ/bq62EU5IiWFsM0LgwkO0UlTGE9gU+U1w1GeDxlDeVqcwitx+
10 | sPjup59ybC2l/iNihETjrLuFUaZVFH/eqdXqNgiRf1fBHV/RD27BlPN6OPo8Z6qT
11 | fuRlVAUbpj+IViIkdQtmymgCJDrQabtnnwpPqZA0MQFnsAvmelOrWyTX1ASjzFAl
12 | WWq7JDvFsrYcqPeMv8QaN/teESFN5pMSNDbvYm2guwcRu4jVL75hMezqm6StXe5p
13 | CbbZOKPHLGqtRUAHnUFHEV1khOEz5HhOsC6m0DvWvKf5QiUvqcYFYxFZAgMBAAGj
14 | QjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUsv65aZzDS8dPy3NWpXkAOKf0
15 | 2rMwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVnXJDTCCcV/c
16 | 79IidGy3Hh/st+4e2A6R3YueE01Rwo340Asp9Tp3IewDQcF3oRosgDp/i9daRrxv
17 | c2q76CNmo57qUSjbdyu4o5SDqj7lmr263YgM4ZnVOQR9CaWwCL21C65tpgHa8Grm
18 | hNil9REdnpM7br4H0yeX2nFjOYI8sUguxNle3ojTFLl0sWXZIPJE/koEaaHGSJD1
19 | XR72llJbbExYbTzaEV3uw7sJsuwldMC/QL+oWm/Jnwc2WfLTl3HjLOaK9r/smF/E
20 | RtYk5yo7J6pMALrIP7SPHpFooez5JHn2ucP42HcUwOXmrDIOUt6gJQ4w8DBE46Bo
21 | oMKSHK2k0g==
22 | -----END CERTIFICATE-----
23 |
--------------------------------------------------------------------------------
/examples/ca/hudsucker.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCaHrZ9VUL7SKqg
3 | JdeG/dciikuummsrDs4Cn+p01J5mURQ/bq62EU5IiWFsM0LgwkO0UlTGE9gU+U1w
4 | 1GeDxlDeVqcwitx+sPjup59ybC2l/iNihETjrLuFUaZVFH/eqdXqNgiRf1fBHV/R
5 | D27BlPN6OPo8Z6qTfuRlVAUbpj+IViIkdQtmymgCJDrQabtnnwpPqZA0MQFnsAvm
6 | elOrWyTX1ASjzFAlWWq7JDvFsrYcqPeMv8QaN/teESFN5pMSNDbvYm2guwcRu4jV
7 | L75hMezqm6StXe5pCbbZOKPHLGqtRUAHnUFHEV1khOEz5HhOsC6m0DvWvKf5QiUv
8 | qcYFYxFZAgMBAAECggEARsXZyV4w3xG0gMw/19aTR2I4dNqmYeRvh9cFpFbK0nNj
9 | F+nswuDZkQe9PCGiEXJEAvdXxInyTVdaT3jKfEHCewdRyUHLFUaRWY6R8spof/Rf
10 | LWtN8zsr9YHUHvfF7GsTN3VOo/nVQ3IIwQkUNEMBN9wYVUrJkufPXBSkL9k9DY7B
11 | uoUghTbpj2nZhSQYRWyAkbc5CcpJzuBDhSE8m2X+0ZMuoW91rOw+G2OLlBwiIOFg
12 | PHPiS0wAMyKycmDnCwm4NDu8BK/I6ucORfFlCyroh63riz/BzJo6FFrzRTd70pHG
13 | 2VwAo+58LkRYkJjW7gJZ0XzoAxEKntsIYf8r25jzXQKBgQDHq8BOTDGkEtUPR8PE
14 | ICKUFck3fHRlI39Sv2P2JBoPgGP9zy2xyycV/Uc3+tbNnmkUrTFPIWTQSorb0i4R
15 | fhg/3Jqtm2NUb1lrLo9oEDEqRgCSLbLYO1zLFD42s7dW7DOEr+q/54pcAh/xVrDB
16 | L4CSnuWpkGB5PiuTugaJB8TOAwKBgQDFmUUMosNxofUFE5HNYX3+N0rMNX4XgXG7
17 | +TS36XJrKo7PQMTnqD5VUr7Iii/1Ncmr/dANSTUzRErkTjm9gbHllakDqy0rTweB
18 | EtskWoN2s45g+i7xocMVNSkLWDd03NzyMM6riArRz2hkWeTSkJLn+AmFm+q+cFHj
19 | yL+6Nx+CcwKBgQCgT4tE0fQBMYWSkSHic5KPprY5MFkbYta1Dyko1G+ABqtBenfL
20 | ibpF82ac0W5pBEiF60/tongYq+C1ARkvvjel/m7J+DpV7liyr11ARc/TiwSmWL6A
21 | 0Zh9DDGvJbeLuHTckYk+rp3tpV8UG3AqiwMFtUHbVCnA7mN6Zh8dIfmnFQKBgH0q
22 | 66xnZfqTJwxCKze4LAFesQjOUcM+AfeakqR1Qj9URAZQ9unvjxypP6T0tBBWNBu4
23 | uZPQ7dw9xFr+mmDKyQ+vT9K9Ge23L/+5HAvZMjF86BHSKO5zE4pZlFhVVzu1tFfO
24 | RvwtPv1Mrsnyj5o6bnR2kEGMVJSxvY3W2mxxAoq1AoGAT2sgBDO0CFydMsrxZPno
25 | YYu8LfNefE2KuVFcoLxiseRK/OHOzw7RtSOa0ZW0omXnawwMEje+LavYCO2GerdT
26 | cwJvbfFUqX1F3FqnbyE7vOFkQnzDboLZuw7kY8JeqEdP34CniG1bX4o3Ni2FrAb/
27 | 01/t/macxPNVtcpvKsABzJY=
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/examples/log.rs:
--------------------------------------------------------------------------------
1 | use hudsucker::{
2 | certificate_authority::RcgenAuthority,
3 | hyper::{Request, Response},
4 | rcgen::{CertificateParams, KeyPair},
5 | rustls::crypto::aws_lc_rs,
6 | tokio_tungstenite::tungstenite::Message,
7 | *,
8 | };
9 | use std::net::SocketAddr;
10 | use tracing::*;
11 |
12 | async fn shutdown_signal() {
13 | tokio::signal::ctrl_c()
14 | .await
15 | .expect("Failed to install CTRL+C signal handler");
16 | }
17 |
18 | #[derive(Clone)]
19 | struct LogHandler;
20 |
21 | impl HttpHandler for LogHandler {
22 | async fn handle_request(
23 | &mut self,
24 | _ctx: &HttpContext,
25 | req: Request,
26 | ) -> RequestOrResponse {
27 | println!("{:?}", req);
28 | req.into()
29 | }
30 |
31 | async fn handle_response(&mut self, _ctx: &HttpContext, res: Response) -> Response {
32 | println!("{:?}", res);
33 | res
34 | }
35 | }
36 |
37 | impl WebSocketHandler for LogHandler {
38 | async fn handle_message(&mut self, _ctx: &WebSocketContext, msg: Message) -> Option {
39 | println!("{:?}", msg);
40 | Some(msg)
41 | }
42 | }
43 |
44 | #[tokio::main]
45 | async fn main() {
46 | tracing_subscriber::fmt::init();
47 |
48 | let key_pair = include_str!("ca/hudsucker.key");
49 | let ca_cert = include_str!("ca/hudsucker.cer");
50 | let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
51 | let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
52 | .expect("Failed to parse CA certificate")
53 | .self_signed(&key_pair)
54 | .expect("Failed to sign CA certificate");
55 |
56 | let ca = RcgenAuthority::new(key_pair, ca_cert, 1_000, aws_lc_rs::default_provider());
57 |
58 | let proxy = Proxy::builder()
59 | .with_addr(SocketAddr::from(([127, 0, 0, 1], 3000)))
60 | .with_ca(ca)
61 | .with_rustls_client(aws_lc_rs::default_provider())
62 | .with_http_handler(LogHandler)
63 | .with_websocket_handler(LogHandler)
64 | .with_graceful_shutdown(shutdown_signal())
65 | .build()
66 | .expect("Failed to create proxy");
67 |
68 | if let Err(e) = proxy.start().await {
69 | error!("{}", e);
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/examples/noop.rs:
--------------------------------------------------------------------------------
1 | use hudsucker::{
2 | certificate_authority::RcgenAuthority,
3 | rcgen::{CertificateParams, KeyPair},
4 | rustls::crypto::aws_lc_rs,
5 | *,
6 | };
7 | use std::net::SocketAddr;
8 | use tracing::*;
9 |
10 | async fn shutdown_signal() {
11 | tokio::signal::ctrl_c()
12 | .await
13 | .expect("Failed to install CTRL+C signal handler");
14 | }
15 |
16 | #[tokio::main]
17 | async fn main() {
18 | tracing_subscriber::fmt::init();
19 |
20 | let key_pair = include_str!("ca/hudsucker.key");
21 | let ca_cert = include_str!("ca/hudsucker.cer");
22 | let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
23 | let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
24 | .expect("Failed to parse CA certificate")
25 | .self_signed(&key_pair)
26 | .expect("Failed to sign CA certificate");
27 |
28 | let ca = RcgenAuthority::new(key_pair, ca_cert, 1_000, aws_lc_rs::default_provider());
29 |
30 | let proxy = Proxy::builder()
31 | .with_addr(SocketAddr::from(([127, 0, 0, 1], 3000)))
32 | .with_ca(ca)
33 | .with_rustls_client(aws_lc_rs::default_provider())
34 | .with_graceful_shutdown(shutdown_signal())
35 | .build()
36 | .expect("Failed to create proxy");
37 |
38 | if let Err(e) = proxy.start().await {
39 | error!("{}", e);
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/examples/openssl.rs:
--------------------------------------------------------------------------------
1 | use hudsucker::{
2 | certificate_authority::OpensslAuthority,
3 | hyper::{Request, Response},
4 | openssl::{hash::MessageDigest, pkey::PKey, x509::X509},
5 | rustls::crypto::aws_lc_rs,
6 | tokio_tungstenite::tungstenite::Message,
7 | *,
8 | };
9 | use std::net::SocketAddr;
10 | use tracing::*;
11 |
12 | async fn shutdown_signal() {
13 | tokio::signal::ctrl_c()
14 | .await
15 | .expect("Failed to install CTRL+C signal handler");
16 | }
17 |
18 | #[derive(Clone)]
19 | struct LogHandler;
20 |
21 | impl HttpHandler for LogHandler {
22 | async fn handle_request(
23 | &mut self,
24 | _ctx: &HttpContext,
25 | req: Request,
26 | ) -> RequestOrResponse {
27 | println!("{:?}", req);
28 | req.into()
29 | }
30 |
31 | async fn handle_response(&mut self, _ctx: &HttpContext, res: Response) -> Response {
32 | println!("{:?}", res);
33 | res
34 | }
35 | }
36 |
37 | impl WebSocketHandler for LogHandler {
38 | async fn handle_message(&mut self, _ctx: &WebSocketContext, msg: Message) -> Option {
39 | println!("{:?}", msg);
40 | Some(msg)
41 | }
42 | }
43 |
44 | #[tokio::main]
45 | async fn main() {
46 | tracing_subscriber::fmt::init();
47 |
48 | let private_key_bytes: &[u8] = include_bytes!("ca/hudsucker.key");
49 | let ca_cert_bytes: &[u8] = include_bytes!("ca/hudsucker.cer");
50 | let private_key =
51 | PKey::private_key_from_pem(private_key_bytes).expect("Failed to parse private key");
52 | let ca_cert = X509::from_pem(ca_cert_bytes).expect("Failed to parse CA certificate");
53 |
54 | let ca = OpensslAuthority::new(
55 | private_key,
56 | ca_cert,
57 | MessageDigest::sha256(),
58 | 1_000,
59 | aws_lc_rs::default_provider(),
60 | );
61 |
62 | let proxy = Proxy::builder()
63 | .with_addr(SocketAddr::from(([127, 0, 0, 1], 3000)))
64 | .with_ca(ca)
65 | .with_rustls_client(aws_lc_rs::default_provider())
66 | .with_http_handler(LogHandler)
67 | .with_graceful_shutdown(shutdown_signal())
68 | .build()
69 | .expect("Failed to create proxy");
70 |
71 | if let Err(e) = proxy.start().await {
72 | error!("{}", e);
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | format_code_in_doc_comments = true
2 | imports_granularity = "Crate"
3 | newline_style = "Unix"
4 |
--------------------------------------------------------------------------------
/src/body.rs:
--------------------------------------------------------------------------------
1 | use crate::Error;
2 | use futures::{Stream, TryStream, TryStreamExt};
3 | use http_body_util::{Collected, Empty, Full, StreamBody, combinators::BoxBody};
4 | use hyper::{
5 | Request, Response,
6 | body::{Body as HttpBody, Bytes, Frame, Incoming, SizeHint},
7 | };
8 | use std::{pin::Pin, task::Poll};
9 |
10 | #[derive(Debug)]
11 | enum Internal {
12 | BoxBody(BoxBody),
13 | Collected(Collected),
14 | Empty(Empty),
15 | Full(Full),
16 | Incoming(Incoming),
17 | String(String),
18 | }
19 |
20 | /// Concrete implementation of [`Body`](HttpBody).
21 | #[derive(Debug)]
22 | pub struct Body {
23 | inner: Internal,
24 | }
25 |
26 | impl Body {
27 | pub fn empty() -> Self {
28 | Self::from(Empty::new())
29 | }
30 |
31 | pub fn from_stream(stream: S) -> Self
32 | where
33 | S: TryStream + Send + Sync + 'static,
34 | S::Ok: Into,
35 | S::Error: Into,
36 | {
37 | Self {
38 | inner: Internal::BoxBody(BoxBody::new(StreamBody::new(
39 | stream
40 | .map_ok(Into::into)
41 | .map_ok(Frame::data)
42 | .map_err(Into::into),
43 | ))),
44 | }
45 | }
46 | }
47 |
48 | impl HttpBody for Body {
49 | type Data = Bytes;
50 | type Error = Error;
51 |
52 | fn poll_frame(
53 | mut self: Pin<&mut Self>,
54 | cx: &mut std::task::Context<'_>,
55 | ) -> Poll, Self::Error>>> {
56 | match &mut self.inner {
57 | Internal::BoxBody(body) => Pin::new(body).poll_frame(cx),
58 | Internal::Collected(body) => Pin::new(body).poll_frame(cx).map_err(|e| match e {}),
59 | Internal::Empty(body) => Pin::new(body).poll_frame(cx).map_err(|e| match e {}),
60 | Internal::Full(body) => Pin::new(body).poll_frame(cx).map_err(|e| match e {}),
61 | Internal::Incoming(body) => Pin::new(body).poll_frame(cx).map_err(Error::from),
62 | Internal::String(body) => Pin::new(body).poll_frame(cx).map_err(|e| match e {}),
63 | }
64 | }
65 |
66 | fn is_end_stream(&self) -> bool {
67 | match &self.inner {
68 | Internal::BoxBody(body) => body.is_end_stream(),
69 | Internal::Collected(body) => body.is_end_stream(),
70 | Internal::Empty(body) => body.is_end_stream(),
71 | Internal::Full(body) => body.is_end_stream(),
72 | Internal::Incoming(body) => body.is_end_stream(),
73 | Internal::String(body) => body.is_end_stream(),
74 | }
75 | }
76 |
77 | fn size_hint(&self) -> SizeHint {
78 | match &self.inner {
79 | Internal::BoxBody(body) => body.size_hint(),
80 | Internal::Collected(body) => body.size_hint(),
81 | Internal::Empty(body) => body.size_hint(),
82 | Internal::Full(body) => body.size_hint(),
83 | Internal::Incoming(body) => body.size_hint(),
84 | Internal::String(body) => body.size_hint(),
85 | }
86 | }
87 | }
88 |
89 | impl From> for Body {
90 | fn from(value: BoxBody) -> Self {
91 | Self {
92 | inner: Internal::BoxBody(value),
93 | }
94 | }
95 | }
96 |
97 | impl From> for Body {
98 | fn from(value: Collected) -> Self {
99 | Self {
100 | inner: Internal::Collected(value),
101 | }
102 | }
103 | }
104 |
105 | impl From> for Body {
106 | fn from(value: Empty) -> Self {
107 | Self {
108 | inner: Internal::Empty(value),
109 | }
110 | }
111 | }
112 |
113 | impl From> for Body {
114 | fn from(value: Full) -> Self {
115 | Self {
116 | inner: Internal::Full(value),
117 | }
118 | }
119 | }
120 |
121 | impl From for Body {
122 | fn from(value: Incoming) -> Self {
123 | Self {
124 | inner: Internal::Incoming(value),
125 | }
126 | }
127 | }
128 |
129 | impl From> for Body
130 | where
131 | S: Stream- , Error>> + Send + Sync + 'static,
132 | {
133 | fn from(value: StreamBody
) -> Self {
134 | Self {
135 | inner: Internal::BoxBody(BoxBody::new(value)),
136 | }
137 | }
138 | }
139 |
140 | impl From for Body {
141 | fn from(value: String) -> Self {
142 | Self {
143 | inner: Internal::String(value),
144 | }
145 | }
146 | }
147 |
148 | impl From<&'static str> for Body {
149 | fn from(value: &'static str) -> Self {
150 | Self {
151 | inner: Internal::Full(Full::new(Bytes::from_static(value.as_bytes()))),
152 | }
153 | }
154 | }
155 |
156 | impl From<&'static [u8]> for Body {
157 | fn from(value: &'static [u8]) -> Self {
158 | Self {
159 | inner: Internal::Full(Full::new(Bytes::from_static(value))),
160 | }
161 | }
162 | }
163 |
164 | impl From> for Body
165 | where
166 | T: Into,
167 | {
168 | fn from(value: Request) -> Self {
169 | value.into_body().into()
170 | }
171 | }
172 |
173 | impl From> for Body
174 | where
175 | T: Into,
176 | {
177 | fn from(value: Response) -> Self {
178 | value.into_body().into()
179 | }
180 | }
181 |
--------------------------------------------------------------------------------
/src/certificate_authority/mod.rs:
--------------------------------------------------------------------------------
1 | #[cfg(feature = "openssl-ca")]
2 | mod openssl_authority;
3 | #[cfg(feature = "rcgen-ca")]
4 | mod rcgen_authority;
5 |
6 | use http::uri::Authority;
7 | use std::sync::Arc;
8 | use tokio_rustls::rustls::ServerConfig;
9 |
10 | #[cfg(feature = "openssl-ca")]
11 | pub use openssl_authority::*;
12 | #[cfg(feature = "rcgen-ca")]
13 | pub use rcgen_authority::*;
14 |
15 | const TTL_SECS: i64 = 365 * 24 * 60 * 60;
16 | const CACHE_TTL: u64 = TTL_SECS as u64 / 2;
17 | const NOT_BEFORE_OFFSET: i64 = 60;
18 |
19 | /// Issues certificates for use when communicating with clients.
20 | ///
21 | /// Clients should be configured to either trust the provided root certificate, or to ignore
22 | /// certificate errors.
23 | pub trait CertificateAuthority: Send + Sync + 'static {
24 | /// Generate ServerConfig for use with rustls.
25 | fn gen_server_config(
26 | &self,
27 | authority: &Authority,
28 | ) -> impl Future> + Send;
29 | }
30 |
--------------------------------------------------------------------------------
/src/certificate_authority/openssl_authority.rs:
--------------------------------------------------------------------------------
1 | use crate::certificate_authority::{CACHE_TTL, CertificateAuthority, NOT_BEFORE_OFFSET, TTL_SECS};
2 | use http::uri::Authority;
3 | use moka::future::Cache;
4 | use openssl::{
5 | asn1::{Asn1Integer, Asn1Time},
6 | bn::BigNum,
7 | error::ErrorStack,
8 | hash::MessageDigest,
9 | pkey::{PKey, Private},
10 | rand,
11 | x509::{X509, X509Builder, X509NameBuilder, extension::SubjectAlternativeName},
12 | };
13 | use std::{
14 | sync::Arc,
15 | time::{Duration, SystemTime},
16 | };
17 | use tokio_rustls::rustls::{
18 | ServerConfig,
19 | crypto::CryptoProvider,
20 | pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer},
21 | };
22 | use tracing::debug;
23 |
24 | /// Issues certificates for use when communicating with clients.
25 | ///
26 | /// Issues certificates for communicating with clients over TLS. Certificates are cached in memory
27 | /// up to a max size that is provided when creating the authority. Certificates are generated using
28 | /// the `openssl` crate.
29 | ///
30 | /// # Examples
31 | ///
32 | /// ```rust
33 | /// use hudsucker::{
34 | /// certificate_authority::OpensslAuthority,
35 | /// openssl::{hash::MessageDigest, pkey::PKey, x509::X509},
36 | /// rustls::crypto::aws_lc_rs,
37 | /// };
38 | ///
39 | /// let private_key_bytes: &[u8] = include_bytes!("../../examples/ca/hudsucker.key");
40 | /// let ca_cert_bytes: &[u8] = include_bytes!("../../examples/ca/hudsucker.cer");
41 | /// let private_key = PKey::private_key_from_pem(private_key_bytes).unwrap();
42 | /// let ca_cert = X509::from_pem(ca_cert_bytes).unwrap();
43 | ///
44 | /// let ca = OpensslAuthority::new(
45 | /// private_key,
46 | /// ca_cert,
47 | /// MessageDigest::sha256(),
48 | /// 1_000,
49 | /// aws_lc_rs::default_provider(),
50 | /// );
51 | /// ```
52 | pub struct OpensslAuthority {
53 | pkey: PKey,
54 | private_key: PrivateKeyDer<'static>,
55 | ca_cert: X509,
56 | hash: MessageDigest,
57 | cache: Cache>,
58 | provider: Arc,
59 | }
60 |
61 | impl OpensslAuthority {
62 | /// Creates a new openssl authority.
63 | pub fn new(
64 | pkey: PKey,
65 | ca_cert: X509,
66 | hash: MessageDigest,
67 | cache_size: u64,
68 | provider: CryptoProvider,
69 | ) -> Self {
70 | let private_key = PrivateKeyDer::from(PrivatePkcs8KeyDer::from(
71 | pkey.private_key_to_pkcs8()
72 | .expect("Failed to encode private key"),
73 | ));
74 |
75 | Self {
76 | pkey,
77 | private_key,
78 | ca_cert,
79 | hash,
80 | cache: Cache::builder()
81 | .max_capacity(cache_size)
82 | .time_to_live(Duration::from_secs(CACHE_TTL))
83 | .build(),
84 | provider: Arc::new(provider),
85 | }
86 | }
87 |
88 | fn gen_cert(&self, authority: &Authority) -> Result, ErrorStack> {
89 | let mut name_builder = X509NameBuilder::new()?;
90 | name_builder.append_entry_by_text("CN", authority.host())?;
91 | let name = name_builder.build();
92 |
93 | let mut x509_builder = X509Builder::new()?;
94 | x509_builder.set_subject_name(&name)?;
95 | x509_builder.set_version(2)?;
96 |
97 | let not_before = SystemTime::now()
98 | .duration_since(SystemTime::UNIX_EPOCH)
99 | .expect("Failed to determine current UNIX time")
100 | .as_secs() as i64
101 | - NOT_BEFORE_OFFSET;
102 | x509_builder.set_not_before(Asn1Time::from_unix(not_before)?.as_ref())?;
103 | x509_builder.set_not_after(Asn1Time::from_unix(not_before + TTL_SECS)?.as_ref())?;
104 |
105 | x509_builder.set_pubkey(&self.pkey)?;
106 | x509_builder.set_issuer_name(self.ca_cert.subject_name())?;
107 |
108 | let alternative_name = SubjectAlternativeName::new()
109 | .dns(authority.host())
110 | .build(&x509_builder.x509v3_context(Some(&self.ca_cert), None))?;
111 | x509_builder.append_extension(alternative_name)?;
112 |
113 | let mut serial_number = [0; 16];
114 | rand::rand_bytes(&mut serial_number)?;
115 |
116 | let serial_number = BigNum::from_slice(&serial_number)?;
117 | let serial_number = Asn1Integer::from_bn(&serial_number)?;
118 | x509_builder.set_serial_number(&serial_number)?;
119 |
120 | x509_builder.sign(&self.pkey, self.hash)?;
121 | let x509 = x509_builder.build();
122 | Ok(CertificateDer::from(x509.to_der()?))
123 | }
124 | }
125 |
126 | impl CertificateAuthority for OpensslAuthority {
127 | async fn gen_server_config(&self, authority: &Authority) -> Arc {
128 | if let Some(server_cfg) = self.cache.get(authority).await {
129 | debug!("Using cached server config");
130 | return server_cfg;
131 | }
132 | debug!("Generating server config");
133 |
134 | let certs = vec![
135 | self.gen_cert(authority)
136 | .unwrap_or_else(|_| panic!("Failed to generate certificate for {}", authority)),
137 | ];
138 |
139 | let mut server_cfg = ServerConfig::builder_with_provider(Arc::clone(&self.provider))
140 | .with_safe_default_protocol_versions()
141 | .expect("Failed to specify protocol versions")
142 | .with_no_client_auth()
143 | .with_single_cert(certs, self.private_key.clone_key())
144 | .expect("Failed to build ServerConfig");
145 |
146 | server_cfg.alpn_protocols = vec![
147 | #[cfg(feature = "http2")]
148 | b"h2".to_vec(),
149 | b"http/1.1".to_vec(),
150 | ];
151 |
152 | let server_cfg = Arc::new(server_cfg);
153 |
154 | self.cache
155 | .insert(authority.clone(), Arc::clone(&server_cfg))
156 | .await;
157 |
158 | server_cfg
159 | }
160 | }
161 |
162 | #[cfg(test)]
163 | mod tests {
164 | use super::*;
165 | use tokio_rustls::rustls::crypto::aws_lc_rs;
166 |
167 | fn build_ca(cache_size: u64) -> OpensslAuthority {
168 | let private_key_bytes: &[u8] = include_bytes!("../../examples/ca/hudsucker.key");
169 | let ca_cert_bytes: &[u8] = include_bytes!("../../examples/ca/hudsucker.cer");
170 | let private_key =
171 | PKey::private_key_from_pem(private_key_bytes).expect("Failed to parse private key");
172 | let ca_cert = X509::from_pem(ca_cert_bytes).expect("Failed to parse CA certificate");
173 |
174 | OpensslAuthority::new(
175 | private_key,
176 | ca_cert,
177 | MessageDigest::sha256(),
178 | cache_size,
179 | aws_lc_rs::default_provider(),
180 | )
181 | }
182 |
183 | #[test]
184 | fn unique_serial_numbers() {
185 | let ca = build_ca(0);
186 |
187 | let authority1 = Authority::from_static("example.com");
188 | let authority2 = Authority::from_static("example2.com");
189 |
190 | let c1 = ca.gen_cert(&authority1).unwrap();
191 | let c2 = ca.gen_cert(&authority2).unwrap();
192 | let c3 = ca.gen_cert(&authority1).unwrap();
193 | let c4 = ca.gen_cert(&authority2).unwrap();
194 |
195 | let (_, cert1) = x509_parser::parse_x509_certificate(&c1).unwrap();
196 | let (_, cert2) = x509_parser::parse_x509_certificate(&c2).unwrap();
197 |
198 | assert_ne!(cert1.raw_serial(), cert2.raw_serial());
199 |
200 | let (_, cert3) = x509_parser::parse_x509_certificate(&c3).unwrap();
201 | let (_, cert4) = x509_parser::parse_x509_certificate(&c4).unwrap();
202 |
203 | assert_ne!(cert3.raw_serial(), cert4.raw_serial());
204 |
205 | assert_ne!(cert1.raw_serial(), cert3.raw_serial());
206 | assert_ne!(cert2.raw_serial(), cert4.raw_serial());
207 | }
208 | }
209 |
--------------------------------------------------------------------------------
/src/certificate_authority/rcgen_authority.rs:
--------------------------------------------------------------------------------
1 | use crate::certificate_authority::{CACHE_TTL, CertificateAuthority, NOT_BEFORE_OFFSET, TTL_SECS};
2 | use http::uri::Authority;
3 | use moka::future::Cache;
4 | use rand::{Rng, rng};
5 | use rcgen::{
6 | Certificate, CertificateParams, DistinguishedName, DnType, Ia5String, KeyPair, SanType,
7 | };
8 | use std::sync::Arc;
9 | use time::{Duration, OffsetDateTime};
10 | use tokio_rustls::rustls::{
11 | ServerConfig,
12 | crypto::CryptoProvider,
13 | pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer},
14 | };
15 | use tracing::debug;
16 |
17 | /// Issues certificates for use when communicating with clients.
18 | ///
19 | /// Issues certificates for communicating with clients over TLS. Certificates are cached in memory
20 | /// up to a max size that is provided when creating the authority. Certificates are generated using
21 | /// the `rcgen` crate.
22 | ///
23 | /// # Examples
24 | ///
25 | /// ```rust
26 | /// use hudsucker::{certificate_authority::RcgenAuthority, rustls::crypto::aws_lc_rs};
27 | /// use rcgen::{CertificateParams, KeyPair};
28 | ///
29 | /// let key_pair = include_str!("../../examples/ca/hudsucker.key");
30 | /// let ca_cert = include_str!("../../examples/ca/hudsucker.cer");
31 | /// let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
32 | /// let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
33 | /// .expect("Failed to parse CA certificate")
34 | /// .self_signed(&key_pair)
35 | /// .expect("Failed to sign CA certificate");
36 | ///
37 | /// let ca = RcgenAuthority::new(key_pair, ca_cert, 1_000, aws_lc_rs::default_provider());
38 | /// ```
39 | pub struct RcgenAuthority {
40 | key_pair: KeyPair,
41 | ca_cert: Certificate,
42 | private_key: PrivateKeyDer<'static>,
43 | cache: Cache>,
44 | provider: Arc,
45 | }
46 |
47 | impl RcgenAuthority {
48 | /// Creates a new rcgen authority.
49 | pub fn new(
50 | key_pair: KeyPair,
51 | ca_cert: Certificate,
52 | cache_size: u64,
53 | provider: CryptoProvider,
54 | ) -> Self {
55 | let private_key = PrivateKeyDer::from(PrivatePkcs8KeyDer::from(key_pair.serialize_der()));
56 |
57 | Self {
58 | key_pair,
59 | ca_cert,
60 | private_key,
61 | cache: Cache::builder()
62 | .max_capacity(cache_size)
63 | .time_to_live(std::time::Duration::from_secs(CACHE_TTL))
64 | .build(),
65 | provider: Arc::new(provider),
66 | }
67 | }
68 |
69 | fn gen_cert(&self, authority: &Authority) -> CertificateDer<'static> {
70 | let mut params = CertificateParams::default();
71 | params.serial_number = Some(rng().random::().into());
72 |
73 | let not_before = OffsetDateTime::now_utc() - Duration::seconds(NOT_BEFORE_OFFSET);
74 | params.not_before = not_before;
75 | params.not_after = not_before + Duration::seconds(TTL_SECS);
76 |
77 | let mut distinguished_name = DistinguishedName::new();
78 | distinguished_name.push(DnType::CommonName, authority.host());
79 | params.distinguished_name = distinguished_name;
80 |
81 | params.subject_alt_names.push(SanType::DnsName(
82 | Ia5String::try_from(authority.host()).expect("Failed to create Ia5String"),
83 | ));
84 |
85 | params
86 | .signed_by(&self.key_pair, &self.ca_cert, &self.key_pair)
87 | .expect("Failed to sign certificate")
88 | .into()
89 | }
90 | }
91 |
92 | impl CertificateAuthority for RcgenAuthority {
93 | async fn gen_server_config(&self, authority: &Authority) -> Arc {
94 | if let Some(server_cfg) = self.cache.get(authority).await {
95 | debug!("Using cached server config");
96 | return server_cfg;
97 | }
98 | debug!("Generating server config");
99 |
100 | let certs = vec![self.gen_cert(authority)];
101 |
102 | let mut server_cfg = ServerConfig::builder_with_provider(Arc::clone(&self.provider))
103 | .with_safe_default_protocol_versions()
104 | .expect("Failed to specify protocol versions")
105 | .with_no_client_auth()
106 | .with_single_cert(certs, self.private_key.clone_key())
107 | .expect("Failed to build ServerConfig");
108 |
109 | server_cfg.alpn_protocols = vec![
110 | #[cfg(feature = "http2")]
111 | b"h2".to_vec(),
112 | b"http/1.1".to_vec(),
113 | ];
114 |
115 | let server_cfg = Arc::new(server_cfg);
116 |
117 | self.cache
118 | .insert(authority.clone(), Arc::clone(&server_cfg))
119 | .await;
120 |
121 | server_cfg
122 | }
123 | }
124 |
125 | #[cfg(test)]
126 | mod tests {
127 | use super::*;
128 | use tokio_rustls::rustls::crypto::aws_lc_rs;
129 |
130 | fn build_ca(cache_size: u64) -> RcgenAuthority {
131 | let key_pair = include_str!("../../examples/ca/hudsucker.key");
132 | let ca_cert = include_str!("../../examples/ca/hudsucker.cer");
133 | let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
134 | let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
135 | .expect("Failed to parse CA certificate")
136 | .self_signed(&key_pair)
137 | .expect("Failed to sign CA certificate");
138 |
139 | RcgenAuthority::new(key_pair, ca_cert, cache_size, aws_lc_rs::default_provider())
140 | }
141 |
142 | #[test]
143 | fn unique_serial_numbers() {
144 | let ca = build_ca(0);
145 |
146 | let authority1 = Authority::from_static("example.com");
147 | let authority2 = Authority::from_static("example2.com");
148 |
149 | let c1 = ca.gen_cert(&authority1);
150 | let c2 = ca.gen_cert(&authority2);
151 | let c3 = ca.gen_cert(&authority1);
152 | let c4 = ca.gen_cert(&authority2);
153 |
154 | let (_, cert1) = x509_parser::parse_x509_certificate(&c1).unwrap();
155 | let (_, cert2) = x509_parser::parse_x509_certificate(&c2).unwrap();
156 |
157 | assert_ne!(cert1.raw_serial(), cert2.raw_serial());
158 |
159 | let (_, cert3) = x509_parser::parse_x509_certificate(&c3).unwrap();
160 | let (_, cert4) = x509_parser::parse_x509_certificate(&c4).unwrap();
161 |
162 | assert_ne!(cert3.raw_serial(), cert4.raw_serial());
163 |
164 | assert_ne!(cert1.raw_serial(), cert3.raw_serial());
165 | assert_ne!(cert2.raw_serial(), cert4.raw_serial());
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/src/decoder.rs:
--------------------------------------------------------------------------------
1 | use crate::{Body, Error};
2 | use async_compression::tokio::bufread::{BrotliDecoder, GzipDecoder, ZlibDecoder, ZstdDecoder};
3 | use bstr::ByteSlice;
4 | use futures::Stream;
5 | use hyper::{
6 | Request, Response,
7 | body::{Body as HttpBody, Bytes},
8 | header::{CONTENT_ENCODING, CONTENT_LENGTH, HeaderMap, HeaderValue},
9 | };
10 | use std::{
11 | io,
12 | pin::Pin,
13 | task::{Context, Poll},
14 | };
15 | use tokio::io::{AsyncBufRead, AsyncRead, BufReader};
16 | use tokio_util::io::{ReaderStream, StreamReader};
17 |
18 | struct IoStream(T);
19 |
20 | impl + Unpin> Stream for IoStream {
21 | type Item = Result;
22 |
23 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> {
24 | loop {
25 | return match futures::ready!(Pin::new(&mut self.0).poll_frame(cx)) {
26 | Some(Ok(frame)) => match frame.into_data() {
27 | Ok(buf) => Poll::Ready(Some(Ok(buf))),
28 | Err(_) => continue,
29 | },
30 | Some(Err(Error::Io(err))) => Poll::Ready(Some(Err(err))),
31 | Some(Err(err)) => Poll::Ready(Some(Err(io::Error::other(err)))),
32 | None => Poll::Ready(None),
33 | };
34 | }
35 | }
36 | }
37 |
38 | fn decode(
39 | encoding: &[u8],
40 | reader: impl AsyncBufRead + Send + Sync + Unpin + 'static,
41 | ) -> Result, Error> {
42 | Ok(match encoding {
43 | b"gzip" | b"x-gzip" => Box::new(GzipDecoder::new(reader)),
44 | b"deflate" => Box::new(ZlibDecoder::new(reader)),
45 | b"br" => Box::new(BrotliDecoder::new(reader)),
46 | b"zstd" => Box::new(ZstdDecoder::new(reader)),
47 | _ => Err(Error::Decode)?,
48 | })
49 | }
50 |
51 | enum Decoder {
52 | Body(T),
53 | Decoder(Box),
54 | }
55 |
56 | impl Decoder {
57 | pub fn decode(self, encoding: &[u8]) -> Result {
58 | if encoding == b"identity" {
59 | return Ok(self);
60 | }
61 |
62 | Ok(Self::Decoder(match self {
63 | Self::Body(body) => decode(encoding, StreamReader::new(IoStream(body))),
64 | Self::Decoder(decoder) => decode(encoding, BufReader::new(decoder)),
65 | }?))
66 | }
67 | }
68 |
69 | impl From> for Body {
70 | fn from(decoder: Decoder) -> Body {
71 | match decoder {
72 | Decoder::Body(body) => body,
73 | Decoder::Decoder(decoder) => Body::from_stream(ReaderStream::new(decoder)),
74 | }
75 | }
76 | }
77 |
78 | fn extract_encodings(headers: &HeaderMap) -> impl Iterator- {
79 | headers
80 | .get_all(CONTENT_ENCODING)
81 | .iter()
82 | .rev()
83 | .flat_map(|val| val.as_bytes().rsplit_str(b",").map(|v| v.trim()))
84 | }
85 |
86 | fn decode_body<'a>(
87 | encodings: impl IntoIterator
- ,
88 | body: Body,
89 | ) -> Result {
90 | let mut decoder = Decoder::Body(body);
91 |
92 | for encoding in encodings {
93 | decoder = decoder.decode(encoding)?;
94 | }
95 |
96 | Ok(decoder.into())
97 | }
98 |
99 | /// Decode the body of a request.
100 | ///
101 | /// # Errors
102 | ///
103 | /// This will return an error if either of the `content-encoding` or `content-length` headers are
104 | /// unable to be parsed, or if one of the values specified in the `content-encoding` header is not
105 | /// supported.
106 | ///
107 | /// # Examples
108 | ///
109 | /// ```rust
110 | /// use hudsucker::{
111 | /// Body, HttpContext, HttpHandler, RequestOrResponse, decode_request, hyper::Request,
112 | /// };
113 | ///
114 | /// #[derive(Clone)]
115 | /// pub struct MyHandler;
116 | ///
117 | /// impl HttpHandler for MyHandler {
118 | /// async fn handle_request(
119 | /// &mut self,
120 | /// _ctx: &HttpContext,
121 | /// req: Request,
122 | /// ) -> RequestOrResponse {
123 | /// let req = decode_request(req).unwrap();
124 | ///
125 | /// // Do something with the request
126 | ///
127 | /// RequestOrResponse::Request(req)
128 | /// }
129 | /// }
130 | /// ```
131 | pub fn decode_request(mut req: Request) -> Result
, Error> {
132 | if !req.headers().contains_key(CONTENT_ENCODING) {
133 | return Ok(req);
134 | }
135 |
136 | if let Some(val) = req.headers_mut().remove(CONTENT_LENGTH) {
137 | if val == "0" {
138 | return Ok(req);
139 | }
140 | }
141 |
142 | let (mut parts, body) = req.into_parts();
143 |
144 | let body = {
145 | let encodings = extract_encodings(&parts.headers);
146 | decode_body(encodings, body)?
147 | };
148 |
149 | parts.headers.remove(CONTENT_ENCODING);
150 |
151 | Ok(Request::from_parts(parts, body))
152 | }
153 |
154 | /// Decode the body of a response.
155 | ///
156 | /// # Errors
157 | ///
158 | /// This will return an error if either of the `content-encoding` or `content-length` headers are
159 | /// unable to be parsed, or if one of the values specified in the `content-encoding` header is not
160 | /// supported.
161 | ///
162 | /// # Examples
163 | ///
164 | /// ```rust
165 | /// use hudsucker::{Body, HttpContext, HttpHandler, decode_response, hyper::Response};
166 | ///
167 | /// #[derive(Clone)]
168 | /// pub struct MyHandler;
169 | ///
170 | /// impl HttpHandler for MyHandler {
171 | /// async fn handle_response(
172 | /// &mut self,
173 | /// _ctx: &HttpContext,
174 | /// res: Response,
175 | /// ) -> Response {
176 | /// let res = decode_response(res).unwrap();
177 | ///
178 | /// // Do something with the response
179 | ///
180 | /// res
181 | /// }
182 | /// }
183 | /// ```
184 | pub fn decode_response(mut res: Response) -> Result, Error> {
185 | if !res.headers().contains_key(CONTENT_ENCODING) {
186 | return Ok(res);
187 | }
188 |
189 | if let Some(val) = res.headers_mut().remove(CONTENT_LENGTH) {
190 | if val == "0" {
191 | return Ok(res);
192 | }
193 | }
194 |
195 | let (mut parts, body) = res.into_parts();
196 |
197 | let body = {
198 | let encodings = extract_encodings(&parts.headers);
199 | decode_body(encodings, body)?
200 | };
201 |
202 | parts.headers.remove(CONTENT_ENCODING);
203 |
204 | Ok(Response::from_parts(parts, body))
205 | }
206 |
207 | #[cfg(test)]
208 | mod tests {
209 | use super::*;
210 |
211 | mod extract_encodings {
212 | use super::*;
213 |
214 | #[test]
215 | fn no_headers() {
216 | let headers = HeaderMap::new();
217 |
218 | assert_eq!(extract_encodings(&headers).count(), 0);
219 | }
220 |
221 | #[test]
222 | fn single_header_single_value() {
223 | let mut headers = HeaderMap::new();
224 | headers.append(CONTENT_ENCODING, HeaderValue::from_static("gzip"));
225 |
226 | assert_eq!(
227 | extract_encodings(&headers).collect::>(),
228 | vec![b"gzip"]
229 | );
230 | }
231 |
232 | #[test]
233 | fn single_header_multiple_values() {
234 | let mut headers = HeaderMap::new();
235 | headers.append(CONTENT_ENCODING, HeaderValue::from_static("gzip, deflate"));
236 |
237 | assert_eq!(
238 | extract_encodings(&headers).collect::>(),
239 | vec![&b"deflate"[..], &b"gzip"[..]]
240 | );
241 | }
242 |
243 | #[test]
244 | fn multiple_headers_single_value() {
245 | let mut headers = HeaderMap::new();
246 | headers.append(CONTENT_ENCODING, HeaderValue::from_static("gzip"));
247 | headers.append(CONTENT_ENCODING, HeaderValue::from_static("deflate"));
248 |
249 | assert_eq!(
250 | extract_encodings(&headers).collect::>(),
251 | vec![&b"deflate"[..], &b"gzip"[..]]
252 | );
253 | }
254 |
255 | #[test]
256 | fn multiple_headers_multiple_values() {
257 | let mut headers = HeaderMap::new();
258 | headers.append(CONTENT_ENCODING, HeaderValue::from_static("gzip, deflate"));
259 | headers.append(CONTENT_ENCODING, HeaderValue::from_static("br, zstd"));
260 |
261 | assert_eq!(
262 | extract_encodings(&headers).collect::>(),
263 | vec![&b"zstd"[..], &b"br"[..], &b"deflate"[..], &b"gzip"[..]]
264 | );
265 | }
266 | }
267 |
268 | async fn to_bytes(body: H) -> Bytes
269 | where
270 | H::Error: std::fmt::Debug,
271 | {
272 | use http_body_util::BodyExt;
273 | body.collect().await.unwrap().to_bytes()
274 | }
275 |
276 | mod decode_body {
277 | use super::*;
278 | use async_compression::tokio::bufread::{BrotliEncoder, GzipEncoder};
279 |
280 | #[tokio::test]
281 | async fn no_encodings() {
282 | let content = "hello, world";
283 | let body = Body::from(content);
284 |
285 | assert_eq!(
286 | &to_bytes(decode_body(vec![], body).unwrap()).await[..],
287 | content.as_bytes()
288 | );
289 | }
290 |
291 | #[tokio::test]
292 | async fn identity_encoding() {
293 | let content = "hello, world";
294 | let body = Body::from(content);
295 |
296 | assert_eq!(
297 | &to_bytes(decode_body(vec![&b"identity"[..]], body).unwrap()).await[..],
298 | content.as_bytes()
299 | );
300 | }
301 |
302 | #[tokio::test]
303 | async fn single_encoding() {
304 | let content = b"hello, world";
305 | let encoder = GzipEncoder::new(&content[..]);
306 | let body = Body::from_stream(ReaderStream::new(encoder));
307 |
308 | assert_eq!(
309 | &to_bytes(decode_body(vec![&b"gzip"[..]], body).unwrap()).await[..],
310 | content
311 | );
312 | }
313 |
314 | #[tokio::test]
315 | async fn multiple_encodings() {
316 | let content = b"hello, world";
317 | let encoder = GzipEncoder::new(&content[..]);
318 | let encoder = BrotliEncoder::new(BufReader::new(encoder));
319 | let body = Body::from_stream(ReaderStream::new(encoder));
320 |
321 | assert_eq!(
322 | &to_bytes(decode_body(vec![&b"br"[..], &b"gzip"[..]], body).unwrap()).await[..],
323 | content
324 | );
325 | }
326 |
327 | #[test]
328 | fn invalid_encoding() {
329 | let body = Body::empty();
330 |
331 | assert!(decode_body(vec![&b"invalid"[..]], body).is_err());
332 | }
333 | }
334 |
335 | mod decode_request {
336 | use super::*;
337 | use async_compression::tokio::bufread::GzipEncoder;
338 |
339 | #[tokio::test]
340 | async fn decodes_request() {
341 | let content = b"hello, world";
342 | let encoder = GzipEncoder::new(&content[..]);
343 | let req = Request::builder()
344 | .header(CONTENT_LENGTH, 123)
345 | .header(CONTENT_ENCODING, "gzip")
346 | .body(Body::from_stream(ReaderStream::new(encoder)))
347 | .unwrap();
348 |
349 | let req = decode_request(req).unwrap();
350 |
351 | assert!(!req.headers().contains_key(CONTENT_LENGTH));
352 | assert!(!req.headers().contains_key(CONTENT_ENCODING));
353 | assert_eq!(&to_bytes(req.into_body()).await[..], content);
354 | }
355 | }
356 |
357 | mod decode_response {
358 | use super::*;
359 | use async_compression::tokio::bufread::GzipEncoder;
360 |
361 | #[tokio::test]
362 | async fn decodes_response() {
363 | let content = b"hello, world";
364 | let encoder = GzipEncoder::new(&content[..]);
365 | let res = Response::builder()
366 | .header(CONTENT_LENGTH, 123)
367 | .header(CONTENT_ENCODING, "gzip")
368 | .body(Body::from_stream(ReaderStream::new(encoder)))
369 | .unwrap();
370 |
371 | let res = decode_response(res).unwrap();
372 |
373 | assert!(!res.headers().contains_key(CONTENT_LENGTH));
374 | assert!(!res.headers().contains_key(CONTENT_ENCODING));
375 | assert_eq!(&to_bytes(res.into_body()).await[..], content);
376 | }
377 | }
378 | }
379 |
--------------------------------------------------------------------------------
/src/error.rs:
--------------------------------------------------------------------------------
1 | use crate::builder;
2 | use thiserror::Error;
3 |
4 | #[derive(Debug, Error)]
5 | #[non_exhaustive]
6 | pub enum Error {
7 | #[error("network error")]
8 | Network(#[from] hyper::Error),
9 | #[error("io error")]
10 | Io(#[from] std::io::Error),
11 | #[error("unable to decode body")]
12 | Decode,
13 | #[error("builder error")]
14 | Builder(#[from] builder::Error),
15 | #[error("unknown error")]
16 | Unknown,
17 | }
18 |
--------------------------------------------------------------------------------
/src/lib.rs:
--------------------------------------------------------------------------------
1 | #![cfg_attr(docsrs, feature(doc_auto_cfg))]
2 |
3 | //! Hudsucker is a MITM HTTP/S proxy that allows you to:
4 | //!
5 | //! - Modify HTTP/S requests
6 | //! - Modify HTTP/S responses
7 | //! - Modify WebSocket messages
8 | //!
9 | //! ## Features
10 | //!
11 | //! - `decoder`: Enables [`decode_request`] and [`decode_response`] helpers (enabled by default).
12 | //! - `full`: Enables all features.
13 | //! - `http2`: Enables HTTP/2 support.
14 | //! - `native-tls-client`: Enables [`ProxyBuilder::with_native_tls_client`](builder::ProxyBuilder::with_native_tls_client).
15 | //! - `openssl-ca`: Enables [`OpensslAuthority`](certificate_authority::OpensslAuthority).
16 | //! - `rcgen-ca`: Enables [`RcgenAuthority`](certificate_authority::RcgenAuthority) (enabled by default).
17 | //! - `rustls-client`: Enables [`ProxyBuilder::with_rustls_client`](builder::ProxyBuilder::with_rustls_client) (enabled by default).
18 |
19 | mod body;
20 | #[cfg(feature = "decoder")]
21 | mod decoder;
22 | mod error;
23 | mod noop;
24 | mod proxy;
25 | mod rewind;
26 |
27 | pub mod certificate_authority;
28 |
29 | use futures::{Sink, SinkExt, Stream, StreamExt};
30 | use hyper::{Request, Response, StatusCode, Uri};
31 | use std::net::SocketAddr;
32 | use tokio_tungstenite::tungstenite::{self, Message};
33 | use tracing::error;
34 |
35 | pub use futures;
36 | pub use hyper;
37 | pub use hyper_util;
38 | #[cfg(feature = "openssl-ca")]
39 | pub use openssl;
40 | #[cfg(feature = "rcgen-ca")]
41 | pub use rcgen;
42 | pub use tokio_rustls::rustls;
43 | pub use tokio_tungstenite;
44 |
45 | pub use body::Body;
46 | #[cfg(feature = "decoder")]
47 | pub use decoder::{decode_request, decode_response};
48 | pub use error::Error;
49 | pub use noop::*;
50 | pub use proxy::*;
51 |
52 | /// Enum representing either an HTTP request or response.
53 | #[derive(Debug)]
54 | pub enum RequestOrResponse {
55 | /// HTTP Request
56 | Request(Request),
57 | /// HTTP Response
58 | Response(Response),
59 | }
60 |
61 | impl From> for RequestOrResponse {
62 | fn from(req: Request) -> Self {
63 | Self::Request(req)
64 | }
65 | }
66 |
67 | impl From> for RequestOrResponse {
68 | fn from(res: Response) -> Self {
69 | Self::Response(res)
70 | }
71 | }
72 |
73 | /// Context for HTTP requests and responses.
74 | #[derive(Clone, Debug, Eq, Hash, PartialEq)]
75 | #[non_exhaustive]
76 | pub struct HttpContext {
77 | /// Address of the client that is sending the request.
78 | pub client_addr: SocketAddr,
79 | }
80 |
81 | /// Context for websocket messages.
82 | #[derive(Clone, Debug, Eq, Hash, PartialEq)]
83 | pub enum WebSocketContext {
84 | #[non_exhaustive]
85 | ClientToServer {
86 | /// Address of the client.
87 | src: SocketAddr,
88 | /// URI of the server.
89 | dst: Uri,
90 | },
91 | #[non_exhaustive]
92 | ServerToClient {
93 | /// URI of the server.
94 | src: Uri,
95 | /// Address of the client.
96 | dst: SocketAddr,
97 | },
98 | }
99 |
100 | /// Handler for HTTP requests and responses.
101 | ///
102 | /// Each request/response pair is passed to the same instance of the handler.
103 | pub trait HttpHandler: Clone + Send + Sync + 'static {
104 | /// This handler will be called for each HTTP request. It can either return a modified request,
105 | /// or a response. If a request is returned, it will be sent to the upstream server. If a
106 | /// response is returned, it will be sent to the client.
107 | fn handle_request(
108 | &mut self,
109 | _ctx: &HttpContext,
110 | req: Request,
111 | ) -> impl Future + Send {
112 | async { req.into() }
113 | }
114 |
115 | /// This handler will be called for each HTTP response. It can modify a response before it is
116 | /// forwarded to the client.
117 | fn handle_response(
118 | &mut self,
119 | _ctx: &HttpContext,
120 | res: Response,
121 | ) -> impl Future> + Send {
122 | async { res }
123 | }
124 |
125 | /// This handler will be called if a proxy request fails. Default response is a 502 Bad Gateway.
126 | fn handle_error(
127 | &mut self,
128 | _ctx: &HttpContext,
129 | err: hyper_util::client::legacy::Error,
130 | ) -> impl Future> + Send {
131 | async move {
132 | error!("Failed to forward request: {}", err);
133 | Response::builder()
134 | .status(StatusCode::BAD_GATEWAY)
135 | .body(Body::empty())
136 | .expect("Failed to build response")
137 | }
138 | }
139 |
140 | /// Whether a CONNECT request should be intercepted. Defaults to `true` for all requests.
141 | fn should_intercept(
142 | &mut self,
143 | _ctx: &HttpContext,
144 | _req: &Request,
145 | ) -> impl Future + Send {
146 | async { true }
147 | }
148 | }
149 |
150 | /// Handler for WebSocket messages.
151 | ///
152 | /// Messages sent over the same WebSocket Stream are passed to the same instance of the handler.
153 | pub trait WebSocketHandler: Clone + Send + Sync + 'static {
154 | /// This handler is responsible for forwarding WebSocket messages from a Stream to a Sink and
155 | /// recovering from any potential errors.
156 | fn handle_websocket(
157 | mut self,
158 | ctx: WebSocketContext,
159 | mut stream: impl Stream- > + Unpin + Send + 'static,
160 | mut sink: impl Sink
+ Unpin + Send + 'static,
161 | ) -> impl Future + Send {
162 | async move {
163 | while let Some(message) = stream.next().await {
164 | match message {
165 | Ok(message) => {
166 | let Some(message) = self.handle_message(&ctx, message).await else {
167 | continue;
168 | };
169 |
170 | match sink.send(message).await {
171 | Err(tungstenite::Error::ConnectionClosed) => (),
172 | Err(e) => error!("WebSocket send error: {}", e),
173 | _ => (),
174 | }
175 | }
176 | Err(e) => {
177 | error!("WebSocket message error: {}", e);
178 |
179 | match sink.send(Message::Close(None)).await {
180 | Err(tungstenite::Error::ConnectionClosed) => (),
181 | Err(e) => error!("WebSocket close error: {}", e),
182 | _ => (),
183 | };
184 |
185 | break;
186 | }
187 | }
188 | }
189 | }
190 | }
191 |
192 | /// This handler will be called for each WebSocket message. It can return an optional modified
193 | /// message. If None is returned the message will not be forwarded.
194 | fn handle_message(
195 | &mut self,
196 | _ctx: &WebSocketContext,
197 | message: Message,
198 | ) -> impl Future> + Send {
199 | async { Some(message) }
200 | }
201 | }
202 |
--------------------------------------------------------------------------------
/src/noop.rs:
--------------------------------------------------------------------------------
1 | use crate::{HttpHandler, WebSocketHandler};
2 |
3 | /// A No-op handler.
4 | ///
5 | /// When using this handler, HTTP requests and responses and WebSocket messages will not be
6 | /// modified.
7 | #[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
8 | pub struct NoopHandler(());
9 |
10 | impl NoopHandler {
11 | pub(crate) fn new() -> Self {
12 | NoopHandler(())
13 | }
14 | }
15 |
16 | impl HttpHandler for NoopHandler {}
17 | impl WebSocketHandler for NoopHandler {}
18 |
--------------------------------------------------------------------------------
/src/proxy/builder.rs:
--------------------------------------------------------------------------------
1 | use crate::{
2 | Body, HttpHandler, NoopHandler, Proxy, WebSocketHandler,
3 | certificate_authority::CertificateAuthority,
4 | };
5 | use hyper_util::{
6 | client::legacy::{Client, connect::Connect},
7 | rt::TokioExecutor,
8 | server::conn::auto::Builder,
9 | };
10 | use std::{
11 | future::{Pending, pending},
12 | net::SocketAddr,
13 | sync::Arc,
14 | };
15 | use thiserror::Error;
16 | use tokio::net::TcpListener;
17 | use tokio_rustls::rustls::{ClientConfig, crypto::CryptoProvider};
18 | use tokio_tungstenite::Connector;
19 |
20 | #[derive(Debug, Error)]
21 | #[non_exhaustive]
22 | pub enum Error {
23 | #[cfg(feature = "native-tls-client")]
24 | #[error("{0}")]
25 | NativeTls(#[from] hyper_tls::native_tls::Error),
26 | #[cfg(feature = "rustls-client")]
27 | #[error("{0}")]
28 | Rustls(#[from] tokio_rustls::rustls::Error),
29 | }
30 |
31 | /// A builder for creating a [`Proxy`].
32 | ///
33 | /// # Examples
34 | ///
35 | /// ```rust
36 | /// # #[cfg(all(feature = "rcgen-ca", feature = "rustls-client"))]
37 | /// # {
38 | /// use hudsucker::Proxy;
39 | /// # use hudsucker::{
40 | /// # certificate_authority::RcgenAuthority,
41 | /// # rcgen::{CertificateParams, KeyPair},
42 | /// # rustls::crypto::aws_lc_rs,
43 | /// # };
44 | /// #
45 | /// # let key_pair = include_str!("../../examples/ca/hudsucker.key");
46 | /// # let ca_cert = include_str!("../../examples/ca/hudsucker.cer");
47 | /// # let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
48 | /// # let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
49 | /// # .expect("Failed to parse CA certificate")
50 | /// # .self_signed(&key_pair)
51 | /// # .expect("Failed to sign CA certificate");
52 | /// #
53 | /// # let ca = RcgenAuthority::new(key_pair, ca_cert, 1_000, aws_lc_rs::default_provider());
54 | ///
55 | /// // let ca = ...;
56 | ///
57 | /// let proxy = Proxy::builder()
58 | /// .with_addr(std::net::SocketAddr::from(([127, 0, 0, 1], 0)))
59 | /// .with_ca(ca)
60 | /// .with_rustls_client(aws_lc_rs::default_provider())
61 | /// .build()
62 | /// .expect("Failed to create proxy");
63 | /// # }
64 | /// ```
65 | #[derive(Clone, Debug, PartialEq, Eq, Hash)]
66 | pub struct ProxyBuilder(T);
67 |
68 | /// Builder state that needs either an address or a TCP listener.
69 | #[derive(Clone, Debug, PartialEq, Eq, Hash)]
70 | pub struct WantsAddr(());
71 |
72 | #[derive(Debug)]
73 | pub(crate) enum AddrOrListener {
74 | Addr(SocketAddr),
75 | Listener(TcpListener),
76 | }
77 |
78 | impl ProxyBuilder {
79 | /// Create a new [`ProxyBuilder`].
80 | pub fn new() -> Self {
81 | Self::default()
82 | }
83 |
84 | /// Set the address to listen on.
85 | pub fn with_addr(self, addr: SocketAddr) -> ProxyBuilder {
86 | ProxyBuilder(WantsCa {
87 | al: AddrOrListener::Addr(addr),
88 | })
89 | }
90 |
91 | /// Set a listener to use for the proxy server.
92 | pub fn with_listener(self, listener: TcpListener) -> ProxyBuilder {
93 | ProxyBuilder(WantsCa {
94 | al: AddrOrListener::Listener(listener),
95 | })
96 | }
97 | }
98 |
99 | impl Default for ProxyBuilder {
100 | fn default() -> Self {
101 | ProxyBuilder(WantsAddr(()))
102 | }
103 | }
104 |
105 | /// Builder state that needs a certificate authority.
106 | #[derive(Debug)]
107 | pub struct WantsCa {
108 | al: AddrOrListener,
109 | }
110 |
111 | impl ProxyBuilder {
112 | /// Set the certificate authority to use.
113 | pub fn with_ca(self, ca: CA) -> ProxyBuilder> {
114 | ProxyBuilder(WantsClient { al: self.0.al, ca })
115 | }
116 | }
117 |
118 | /// Builder state that needs a client.
119 | #[derive(Debug)]
120 | pub struct WantsClient {
121 | al: AddrOrListener,
122 | ca: CA,
123 | }
124 |
125 | impl ProxyBuilder> {
126 | /// Use a hyper-rustls connector.
127 | #[cfg(feature = "rustls-client")]
128 | pub fn with_rustls_client(
129 | self,
130 | provider: CryptoProvider,
131 | ) -> ProxyBuilder>>
132 | {
133 | use hyper_rustls::ConfigBuilderExt;
134 |
135 | let rustls_config = match ClientConfig::builder_with_provider(Arc::new(provider))
136 | .with_safe_default_protocol_versions()
137 | {
138 | Ok(config) => config.with_webpki_roots().with_no_client_auth(),
139 | Err(e) => {
140 | return ProxyBuilder(WantsHandlers {
141 | al: self.0.al,
142 | ca: self.0.ca,
143 | client: Err(Error::from(e)),
144 | http_handler: NoopHandler::new(),
145 | websocket_handler: NoopHandler::new(),
146 | websocket_connector: None,
147 | server: None,
148 | graceful_shutdown: pending(),
149 | });
150 | }
151 | };
152 |
153 | let https = hyper_rustls::HttpsConnectorBuilder::new()
154 | .with_tls_config(rustls_config.clone())
155 | .https_or_http()
156 | .enable_http1();
157 |
158 | #[cfg(feature = "http2")]
159 | let https = https.enable_http2();
160 |
161 | let https = https.build();
162 |
163 | ProxyBuilder(WantsHandlers {
164 | al: self.0.al,
165 | ca: self.0.ca,
166 | client: Ok(Client::builder(TokioExecutor::new())
167 | .http1_title_case_headers(true)
168 | .http1_preserve_header_case(true)
169 | .build(https)),
170 | http_handler: NoopHandler::new(),
171 | websocket_handler: NoopHandler::new(),
172 | websocket_connector: Some(Connector::Rustls(Arc::new(rustls_config))),
173 | server: None,
174 | graceful_shutdown: pending(),
175 | })
176 | }
177 |
178 | /// Use a hyper-tls connector.
179 | #[cfg(feature = "native-tls-client")]
180 | pub fn with_native_tls_client(
181 | self,
182 | ) -> ProxyBuilder>>
183 | {
184 | use hyper_util::client::legacy::connect::HttpConnector;
185 |
186 | let tls_connector = match hyper_tls::native_tls::TlsConnector::new() {
187 | Ok(tls_connector) => tls_connector,
188 | Err(e) => {
189 | return ProxyBuilder(WantsHandlers {
190 | al: self.0.al,
191 | ca: self.0.ca,
192 | client: Err(Error::from(e)),
193 | http_handler: NoopHandler::new(),
194 | websocket_handler: NoopHandler::new(),
195 | websocket_connector: None,
196 | server: None,
197 | graceful_shutdown: pending(),
198 | });
199 | }
200 | };
201 |
202 | let tokio_tls_connector = tokio_native_tls::TlsConnector::from(tls_connector.clone());
203 | let https = hyper_tls::HttpsConnector::from((HttpConnector::new(), tokio_tls_connector));
204 |
205 | ProxyBuilder(WantsHandlers {
206 | al: self.0.al,
207 | ca: self.0.ca,
208 | client: Ok(Client::builder(TokioExecutor::new())
209 | .http1_title_case_headers(true)
210 | .http1_preserve_header_case(true)
211 | .build(https)),
212 | http_handler: NoopHandler::new(),
213 | websocket_handler: NoopHandler::new(),
214 | websocket_connector: Some(Connector::NativeTls(tls_connector)),
215 | server: None,
216 | graceful_shutdown: pending(),
217 | })
218 | }
219 |
220 | /// Use a custom client.
221 | pub fn with_client(
222 | self,
223 | client: Client,
224 | ) -> ProxyBuilder>>
225 | where
226 | C: Connect + Clone + Send + Sync + 'static,
227 | {
228 | ProxyBuilder(WantsHandlers {
229 | al: self.0.al,
230 | ca: self.0.ca,
231 | client: Ok(client),
232 | http_handler: NoopHandler::new(),
233 | websocket_handler: NoopHandler::new(),
234 | websocket_connector: None,
235 | server: None,
236 | graceful_shutdown: pending(),
237 | })
238 | }
239 | }
240 |
241 | /// Builder state that can take additional handlers.
242 | pub struct WantsHandlers {
243 | al: AddrOrListener,
244 | ca: CA,
245 | client: Result, Error>,
246 | http_handler: H,
247 | websocket_handler: W,
248 | websocket_connector: Option,
249 | server: Option>,
250 | graceful_shutdown: F,
251 | }
252 |
253 | impl ProxyBuilder> {
254 | /// Set the HTTP handler.
255 | pub fn with_http_handler(
256 | self,
257 | http_handler: H2,
258 | ) -> ProxyBuilder> {
259 | ProxyBuilder(WantsHandlers {
260 | al: self.0.al,
261 | ca: self.0.ca,
262 | client: self.0.client,
263 | http_handler,
264 | websocket_handler: self.0.websocket_handler,
265 | websocket_connector: self.0.websocket_connector,
266 | server: self.0.server,
267 | graceful_shutdown: self.0.graceful_shutdown,
268 | })
269 | }
270 |
271 | /// Set the WebSocket handler.
272 | pub fn with_websocket_handler(
273 | self,
274 | websocket_handler: W2,
275 | ) -> ProxyBuilder> {
276 | ProxyBuilder(WantsHandlers {
277 | al: self.0.al,
278 | ca: self.0.ca,
279 | client: self.0.client,
280 | http_handler: self.0.http_handler,
281 | websocket_handler,
282 | websocket_connector: self.0.websocket_connector,
283 | server: self.0.server,
284 | graceful_shutdown: self.0.graceful_shutdown,
285 | })
286 | }
287 |
288 | /// Set the connector to use when connecting to WebSocket servers.
289 | pub fn with_websocket_connector(self, connector: Connector) -> Self {
290 | ProxyBuilder(WantsHandlers {
291 | websocket_connector: Some(connector),
292 | ..self.0
293 | })
294 | }
295 |
296 | /// Set a custom server builder to use for the proxy server.
297 | pub fn with_server(self, server: Builder) -> Self {
298 | ProxyBuilder(WantsHandlers {
299 | server: Some(server),
300 | ..self.0
301 | })
302 | }
303 |
304 | /// Set a future that when ready will gracefully shutdown the proxy server.
305 | pub fn with_graceful_shutdown + Send + 'static>(
306 | self,
307 | graceful_shutdown: F2,
308 | ) -> ProxyBuilder> {
309 | ProxyBuilder(WantsHandlers {
310 | al: self.0.al,
311 | ca: self.0.ca,
312 | client: self.0.client,
313 | http_handler: self.0.http_handler,
314 | websocket_handler: self.0.websocket_handler,
315 | websocket_connector: self.0.websocket_connector,
316 | server: self.0.server,
317 | graceful_shutdown,
318 | })
319 | }
320 |
321 | /// Build the proxy.
322 | pub fn build(self) -> Result, crate::Error> {
323 | Ok(Proxy {
324 | al: self.0.al,
325 | ca: Arc::new(self.0.ca),
326 | client: self.0.client?,
327 | http_handler: self.0.http_handler,
328 | websocket_handler: self.0.websocket_handler,
329 | websocket_connector: self.0.websocket_connector,
330 | server: self.0.server,
331 | graceful_shutdown: self.0.graceful_shutdown,
332 | })
333 | }
334 | }
335 |
--------------------------------------------------------------------------------
/src/proxy/internal.rs:
--------------------------------------------------------------------------------
1 | use crate::{
2 | HttpContext, HttpHandler, RequestOrResponse, WebSocketContext, WebSocketHandler, body::Body,
3 | certificate_authority::CertificateAuthority, rewind::Rewind,
4 | };
5 | use futures::{Sink, Stream, StreamExt};
6 | use http::uri::{Authority, Scheme};
7 | use hyper::{
8 | Method, Request, Response, StatusCode, Uri,
9 | body::{Bytes, Incoming},
10 | header::Entry,
11 | service::service_fn,
12 | upgrade::Upgraded,
13 | };
14 | use hyper_util::{
15 | client::legacy::{Client, connect::Connect},
16 | rt::{TokioExecutor, TokioIo},
17 | server,
18 | };
19 | use std::{convert::Infallible, net::SocketAddr, sync::Arc};
20 | use tokio::{io::AsyncReadExt, net::TcpStream, task::JoinHandle};
21 | use tokio_rustls::TlsAcceptor;
22 | use tokio_tungstenite::{
23 | Connector, WebSocketStream,
24 | tungstenite::{self, Message},
25 | };
26 | use tracing::{Instrument, Span, error, info_span, instrument, warn};
27 |
28 | fn bad_request() -> Response {
29 | Response::builder()
30 | .status(StatusCode::BAD_REQUEST)
31 | .body(Body::empty())
32 | .expect("Failed to build response")
33 | }
34 |
35 | fn spawn_with_trace(
36 | fut: impl Future + Send + 'static,
37 | span: Span,
38 | ) -> JoinHandle {
39 | tokio::spawn(fut.instrument(span))
40 | }
41 |
42 | pub(crate) struct InternalProxy {
43 | pub ca: Arc,
44 | pub client: Client,
45 | pub server: server::conn::auto::Builder,
46 | pub http_handler: H,
47 | pub websocket_handler: W,
48 | pub websocket_connector: Option,
49 | pub client_addr: SocketAddr,
50 | }
51 |
52 | impl Clone for InternalProxy
53 | where
54 | C: Clone,
55 | H: Clone,
56 | W: Clone,
57 | {
58 | fn clone(&self) -> Self {
59 | InternalProxy {
60 | ca: Arc::clone(&self.ca),
61 | client: self.client.clone(),
62 | server: self.server.clone(),
63 | http_handler: self.http_handler.clone(),
64 | websocket_handler: self.websocket_handler.clone(),
65 | websocket_connector: self.websocket_connector.clone(),
66 | client_addr: self.client_addr,
67 | }
68 | }
69 | }
70 |
71 | impl InternalProxy
72 | where
73 | C: Connect + Clone + Send + Sync + 'static,
74 | CA: CertificateAuthority,
75 | H: HttpHandler,
76 | W: WebSocketHandler,
77 | {
78 | fn context(&self) -> HttpContext {
79 | HttpContext {
80 | client_addr: self.client_addr,
81 | }
82 | }
83 |
84 | #[instrument(
85 | skip_all,
86 | fields(
87 | version = ?req.version(),
88 | method = %req.method(),
89 | uri=%req.uri(),
90 | client_addr = %self.client_addr,
91 | )
92 | )]
93 | pub(crate) async fn proxy(
94 | mut self,
95 | req: Request,
96 | ) -> Result, Infallible> {
97 | let ctx = self.context();
98 |
99 | let req = match self
100 | .http_handler
101 | .handle_request(&ctx, req.map(Body::from))
102 | .instrument(info_span!("handle_request"))
103 | .await
104 | {
105 | RequestOrResponse::Request(req) => req,
106 | RequestOrResponse::Response(res) => return Ok(res),
107 | };
108 |
109 | if req.method() == Method::CONNECT {
110 | Ok(self.process_connect(req))
111 | } else if hyper_tungstenite::is_upgrade_request(&req) {
112 | Ok(self.upgrade_websocket(req))
113 | } else {
114 | let res = self
115 | .client
116 | .request(normalize_request(req))
117 | .instrument(info_span!("proxy_request"))
118 | .await;
119 |
120 | match res {
121 | Ok(res) => Ok(self
122 | .http_handler
123 | .handle_response(&ctx, res.map(Body::from))
124 | .instrument(info_span!("handle_response"))
125 | .await),
126 | Err(err) => Ok(self
127 | .http_handler
128 | .handle_error(&ctx, err)
129 | .instrument(info_span!("handle_error"))
130 | .await),
131 | }
132 | }
133 | }
134 |
135 | fn process_connect(mut self, mut req: Request) -> Response {
136 | match req.uri().authority().cloned() {
137 | Some(authority) => {
138 | let span = info_span!("process_connect");
139 | let fut = async move {
140 | match hyper::upgrade::on(&mut req).await {
141 | Ok(upgraded) => {
142 | let mut upgraded = TokioIo::new(upgraded);
143 | let mut buffer = [0; 4];
144 | let bytes_read = match upgraded.read(&mut buffer).await {
145 | Ok(bytes_read) => bytes_read,
146 | Err(e) => {
147 | error!("Failed to read from upgraded connection: {}", e);
148 | return;
149 | }
150 | };
151 |
152 | let mut upgraded = Rewind::new(
153 | upgraded,
154 | Bytes::copy_from_slice(buffer[..bytes_read].as_ref()),
155 | );
156 |
157 | if self
158 | .http_handler
159 | .should_intercept(&self.context(), &req)
160 | .await
161 | {
162 | if buffer == *b"GET " {
163 | if let Err(e) = self
164 | .serve_stream(
165 | TokioIo::new(upgraded),
166 | Scheme::HTTP,
167 | authority,
168 | )
169 | .await
170 | {
171 | error!("WebSocket connect error: {}", e);
172 | }
173 |
174 | return;
175 | } else if buffer[..2] == *b"\x16\x03" {
176 | let server_config = self
177 | .ca
178 | .gen_server_config(&authority)
179 | .instrument(info_span!("gen_server_config"))
180 | .await;
181 |
182 | let stream = match TlsAcceptor::from(server_config)
183 | .accept(upgraded)
184 | .await
185 | {
186 | Ok(stream) => TokioIo::new(stream),
187 | Err(e) => {
188 | error!("Failed to establish TLS connection: {}", e);
189 | return;
190 | }
191 | };
192 |
193 | if let Err(e) =
194 | self.serve_stream(stream, Scheme::HTTPS, authority).await
195 | {
196 | if !e
197 | .to_string()
198 | .starts_with("error shutting down connection")
199 | {
200 | error!("HTTPS connect error: {}", e);
201 | }
202 | }
203 |
204 | return;
205 | } else {
206 | warn!(
207 | "Unknown protocol, read '{:02X?}' from upgraded connection",
208 | &buffer[..bytes_read]
209 | );
210 | }
211 | }
212 |
213 | let mut server = match TcpStream::connect(authority.as_ref()).await {
214 | Ok(server) => server,
215 | Err(e) => {
216 | error!("Failed to connect to {}: {}", authority, e);
217 | return;
218 | }
219 | };
220 |
221 | if let Err(e) =
222 | tokio::io::copy_bidirectional(&mut upgraded, &mut server).await
223 | {
224 | error!("Failed to tunnel to {}: {}", authority, e);
225 | }
226 | }
227 | Err(e) => error!("Upgrade error: {}", e),
228 | };
229 | };
230 |
231 | spawn_with_trace(fut, span);
232 | Response::new(Body::empty())
233 | }
234 | None => bad_request(),
235 | }
236 | }
237 |
238 | #[instrument(skip_all)]
239 | fn upgrade_websocket(self, req: Request) -> Response {
240 | let mut req = {
241 | let (mut parts, _) = req.into_parts();
242 |
243 | parts.uri = {
244 | let mut parts = parts.uri.into_parts();
245 |
246 | parts.scheme = if parts.scheme.unwrap_or(Scheme::HTTP) == Scheme::HTTP {
247 | Some("ws".try_into().expect("Failed to convert scheme"))
248 | } else {
249 | Some("wss".try_into().expect("Failed to convert scheme"))
250 | };
251 |
252 | match Uri::from_parts(parts) {
253 | Ok(uri) => uri,
254 | Err(_) => {
255 | return bad_request();
256 | }
257 | }
258 | };
259 |
260 | Request::from_parts(parts, ())
261 | };
262 |
263 | match hyper_tungstenite::upgrade(&mut req, None) {
264 | Ok((res, websocket)) => {
265 | let span = info_span!("websocket");
266 | let fut = async move {
267 | match websocket.await {
268 | Ok(ws) => {
269 | if let Err(e) = self.handle_websocket(ws, req).await {
270 | error!("Failed to handle WebSocket: {}", e);
271 | }
272 | }
273 | Err(e) => {
274 | error!("Failed to upgrade to WebSocket: {}", e);
275 | }
276 | }
277 | };
278 |
279 | spawn_with_trace(fut, span);
280 | res.map(Body::from)
281 | }
282 | Err(_) => bad_request(),
283 | }
284 | }
285 |
286 | #[instrument(skip_all)]
287 | async fn handle_websocket(
288 | self,
289 | client_socket: WebSocketStream>,
290 | req: Request<()>,
291 | ) -> Result<(), tungstenite::Error> {
292 | let uri = req.uri().clone();
293 |
294 | #[cfg(any(feature = "rustls-client", feature = "native-tls-client"))]
295 | let (server_socket, _) = tokio_tungstenite::connect_async_tls_with_config(
296 | req,
297 | None,
298 | false,
299 | self.websocket_connector,
300 | )
301 | .await?;
302 |
303 | #[cfg(not(any(feature = "rustls-client", feature = "native-tls-client")))]
304 | let (server_socket, _) = tokio_tungstenite::connect_async(req).await?;
305 |
306 | let (server_sink, server_stream) = server_socket.split();
307 | let (client_sink, client_stream) = client_socket.split();
308 |
309 | let InternalProxy {
310 | websocket_handler, ..
311 | } = self;
312 |
313 | spawn_message_forwarder(
314 | server_stream,
315 | client_sink,
316 | websocket_handler.clone(),
317 | WebSocketContext::ServerToClient {
318 | src: uri.clone(),
319 | dst: self.client_addr,
320 | },
321 | );
322 |
323 | spawn_message_forwarder(
324 | client_stream,
325 | server_sink,
326 | websocket_handler,
327 | WebSocketContext::ClientToServer {
328 | src: self.client_addr,
329 | dst: uri,
330 | },
331 | );
332 |
333 | Ok(())
334 | }
335 |
336 | #[instrument(skip_all)]
337 | async fn serve_stream(
338 | self,
339 | stream: I,
340 | scheme: Scheme,
341 | authority: Authority,
342 | ) -> Result<(), Box>
343 | where
344 | I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static,
345 | {
346 | let service = service_fn(|mut req| {
347 | if req.version() == hyper::Version::HTTP_10 || req.version() == hyper::Version::HTTP_11
348 | {
349 | let (mut parts, body) = req.into_parts();
350 |
351 | parts.uri = {
352 | let mut parts = parts.uri.into_parts();
353 | parts.scheme = Some(scheme.clone());
354 | parts.authority = Some(authority.clone());
355 | Uri::from_parts(parts).expect("Failed to build URI")
356 | };
357 |
358 | req = Request::from_parts(parts, body);
359 | };
360 |
361 | self.clone().proxy(req)
362 | });
363 |
364 | self.server
365 | .serve_connection_with_upgrades(stream, service)
366 | .await
367 | }
368 | }
369 |
370 | fn spawn_message_forwarder(
371 | stream: impl Stream- > + Unpin + Send + 'static,
372 | sink: impl Sink
+ Unpin + Send + 'static,
373 | handler: impl WebSocketHandler,
374 | ctx: WebSocketContext,
375 | ) {
376 | let span = info_span!("message_forwarder", context = ?ctx);
377 | let fut = handler.handle_websocket(ctx, stream, sink);
378 | spawn_with_trace(fut, span);
379 | }
380 |
381 | #[instrument(skip_all)]
382 | fn normalize_request(mut req: Request) -> Request {
383 | // Hyper will automatically add a Host header if needed.
384 | req.headers_mut().remove(hyper::header::HOST);
385 |
386 | // HTTP/2 supports multiple cookie headers, but HTTP/1.x only supports one.
387 | if let Entry::Occupied(mut cookies) = req.headers_mut().entry(hyper::header::COOKIE) {
388 | let joined_cookies = bstr::join(b"; ", cookies.iter());
389 | cookies.insert(joined_cookies.try_into().expect("Failed to join cookies"));
390 | }
391 |
392 | *req.version_mut() = hyper::Version::HTTP_11;
393 | req
394 | }
395 |
396 | #[cfg(test)]
397 | mod tests {
398 | use super::*;
399 | use hyper_util::client::legacy::connect::HttpConnector;
400 | use tokio_rustls::rustls::ServerConfig;
401 |
402 | struct CA;
403 |
404 | impl CertificateAuthority for CA {
405 | async fn gen_server_config(&self, _authority: &Authority) -> Arc {
406 | unimplemented!();
407 | }
408 | }
409 |
410 | fn build_proxy() -> InternalProxy {
411 | InternalProxy {
412 | ca: Arc::new(CA),
413 | client: Client::builder(TokioExecutor::new()).build(HttpConnector::new()),
414 | server: server::conn::auto::Builder::new(TokioExecutor::new()),
415 | http_handler: crate::NoopHandler::new(),
416 | websocket_handler: crate::NoopHandler::new(),
417 | websocket_connector: None,
418 | client_addr: "127.0.0.1:8080".parse().unwrap(),
419 | }
420 | }
421 |
422 | mod bad_request {
423 | use super::*;
424 |
425 | #[test]
426 | fn correct_status() {
427 | let res = bad_request();
428 | assert_eq!(res.status(), StatusCode::BAD_REQUEST);
429 | }
430 | }
431 |
432 | mod normalize_request {
433 | use super::*;
434 |
435 | #[test]
436 | fn removes_host_header() {
437 | let req = Request::builder()
438 | .uri("http://example.com/")
439 | .header(hyper::header::HOST, "example.com")
440 | .body(())
441 | .unwrap();
442 |
443 | let req = normalize_request(req);
444 |
445 | assert_eq!(req.headers().get(hyper::header::HOST), None);
446 | }
447 |
448 | #[test]
449 | fn joins_cookies() {
450 | let req = Request::builder()
451 | .uri("http://example.com/")
452 | .header(hyper::header::COOKIE, "foo=bar")
453 | .header(hyper::header::COOKIE, "baz=qux")
454 | .body(())
455 | .unwrap();
456 |
457 | let req = normalize_request(req);
458 |
459 | assert_eq!(
460 | req.headers().get_all(hyper::header::COOKIE).iter().count(),
461 | 1
462 | );
463 |
464 | assert_eq!(
465 | req.headers().get(hyper::header::COOKIE),
466 | Some(&"foo=bar; baz=qux".parse().unwrap())
467 | );
468 | }
469 | }
470 |
471 | mod process_connect {
472 | use super::*;
473 |
474 | #[test]
475 | fn returns_bad_request_if_missing_authority() {
476 | let proxy = build_proxy();
477 |
478 | let req = Request::builder()
479 | .uri("/foo/bar?baz")
480 | .body(Body::empty())
481 | .unwrap();
482 |
483 | let res = proxy.process_connect(req);
484 |
485 | assert_eq!(res.status(), StatusCode::BAD_REQUEST)
486 | }
487 | }
488 |
489 | mod upgrade_websocket {
490 | use super::*;
491 |
492 | #[test]
493 | fn returns_bad_request_if_missing_authority() {
494 | let proxy = build_proxy();
495 |
496 | let req = Request::builder()
497 | .uri("/foo/bar?baz")
498 | .body(Body::empty())
499 | .unwrap();
500 |
501 | let res = proxy.upgrade_websocket(req);
502 |
503 | assert_eq!(res.status(), StatusCode::BAD_REQUEST)
504 | }
505 |
506 | #[test]
507 | fn returns_bad_request_if_missing_headers() {
508 | let proxy = build_proxy();
509 |
510 | let req = Request::builder()
511 | .uri("http://example.com/foo/bar?baz")
512 | .body(Body::empty())
513 | .unwrap();
514 |
515 | let res = proxy.upgrade_websocket(req);
516 |
517 | assert_eq!(res.status(), StatusCode::BAD_REQUEST)
518 | }
519 | }
520 | }
521 |
--------------------------------------------------------------------------------
/src/proxy/mod.rs:
--------------------------------------------------------------------------------
1 | mod internal;
2 |
3 | pub mod builder;
4 |
5 | use crate::{
6 | Body, Error, HttpHandler, WebSocketHandler, builder::ProxyBuilder,
7 | certificate_authority::CertificateAuthority,
8 | };
9 | use builder::{AddrOrListener, WantsAddr};
10 | use hyper::service::service_fn;
11 | use hyper_util::{
12 | client::legacy::{Client, connect::Connect},
13 | rt::{TokioExecutor, TokioIo},
14 | server::conn::auto::{self, Builder},
15 | };
16 | use internal::InternalProxy;
17 | use std::sync::Arc;
18 | use tokio::net::TcpListener;
19 | use tokio_graceful::Shutdown;
20 | use tokio_tungstenite::Connector;
21 | use tracing::error;
22 |
23 | /// A proxy server. This must be constructed with a [`ProxyBuilder`].
24 | ///
25 | /// # Examples
26 | ///
27 | /// ```rust
28 | /// use hudsucker::Proxy;
29 | /// # use hudsucker::{
30 | /// # certificate_authority::RcgenAuthority,
31 | /// # rcgen::{CertificateParams, KeyPair},
32 | /// # rustls::crypto::aws_lc_rs,
33 | /// # };
34 | /// #
35 | /// # #[cfg(all(feature = "rcgen-ca", feature = "rustls-client"))]
36 | /// # #[tokio::main]
37 | /// # async fn main() {
38 | /// # let key_pair = include_str!("../../examples/ca/hudsucker.key");
39 | /// # let ca_cert = include_str!("../../examples/ca/hudsucker.cer");
40 | /// # let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
41 | /// # let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
42 | /// # .expect("Failed to parse CA certificate")
43 | /// # .self_signed(&key_pair)
44 | /// # .expect("Failed to sign CA certificate");
45 | /// #
46 | /// # let ca = RcgenAuthority::new(key_pair, ca_cert, 1_000, aws_lc_rs::default_provider());
47 | ///
48 | /// // let ca = ...;
49 | ///
50 | /// let (stop, done) = tokio::sync::oneshot::channel();
51 | ///
52 | /// let proxy = Proxy::builder()
53 | /// .with_addr(std::net::SocketAddr::from(([127, 0, 0, 1], 0)))
54 | /// .with_ca(ca)
55 | /// .with_rustls_client(aws_lc_rs::default_provider())
56 | /// .with_graceful_shutdown(async {
57 | /// done.await.unwrap_or_default();
58 | /// })
59 | /// .build()
60 | /// .expect("Failed to create proxy");
61 | ///
62 | /// tokio::spawn(proxy.start());
63 | ///
64 | /// // Do something else...
65 | ///
66 | /// stop.send(()).unwrap();
67 | /// # }
68 | /// #
69 | /// # #[cfg(not(all(feature = "rcgen-ca", feature = "rustls-client")))]
70 | /// # fn main() {}
71 | /// ```
72 | pub struct Proxy {
73 | al: AddrOrListener,
74 | ca: Arc,
75 | client: Client,
76 | http_handler: H,
77 | websocket_handler: W,
78 | websocket_connector: Option,
79 | server: Option>,
80 | graceful_shutdown: F,
81 | }
82 |
83 | impl Proxy<(), (), (), (), ()> {
84 | /// Create a new [`ProxyBuilder`].
85 | pub fn builder() -> ProxyBuilder {
86 | ProxyBuilder::new()
87 | }
88 | }
89 |
90 | impl Proxy
91 | where
92 | C: Connect + Clone + Send + Sync + 'static,
93 | CA: CertificateAuthority,
94 | H: HttpHandler,
95 | W: WebSocketHandler,
96 | F: Future + Send + 'static,
97 | {
98 | /// Attempts to start the proxy server.
99 | ///
100 | /// # Errors
101 | ///
102 | /// This will return an error if the proxy server is unable to be started.
103 | pub async fn start(self) -> Result<(), Error> {
104 | let server = self.server.unwrap_or_else(|| {
105 | let mut builder = auto::Builder::new(TokioExecutor::new());
106 | builder
107 | .http1()
108 | .title_case_headers(true)
109 | .preserve_header_case(true);
110 | builder
111 | });
112 |
113 | let listener = match self.al {
114 | AddrOrListener::Addr(addr) => TcpListener::bind(addr).await?,
115 | AddrOrListener::Listener(listener) => listener,
116 | };
117 |
118 | let shutdown = Shutdown::new(self.graceful_shutdown);
119 | let guard = shutdown.guard_weak();
120 |
121 | loop {
122 | tokio::select! {
123 | res = listener.accept() => {
124 | let (tcp, client_addr) = match res {
125 | Ok((tcp, client_addr)) => (tcp, client_addr),
126 | Err(e) => {
127 | error!("Failed to accept incoming connection: {}", e);
128 | continue;
129 | }
130 | };
131 |
132 | let server = server.clone();
133 | let client = self.client.clone();
134 | let ca = Arc::clone(&self.ca);
135 | let http_handler = self.http_handler.clone();
136 | let websocket_handler = self.websocket_handler.clone();
137 | let websocket_connector = self.websocket_connector.clone();
138 |
139 | shutdown.spawn_task_fn(move |guard| async move {
140 | let conn = server.serve_connection_with_upgrades(
141 | TokioIo::new(tcp),
142 | service_fn(|req| {
143 | InternalProxy {
144 | ca: Arc::clone(&ca),
145 | client: client.clone(),
146 | server: server.clone(),
147 | http_handler: http_handler.clone(),
148 | websocket_handler: websocket_handler.clone(),
149 | websocket_connector: websocket_connector.clone(),
150 | client_addr,
151 | }
152 | .proxy(req)
153 | }),
154 | );
155 |
156 | let mut conn = std::pin::pin!(conn);
157 |
158 | if let Err(err) = tokio::select! {
159 | conn = conn.as_mut() => conn,
160 | _ = guard.cancelled() => {
161 | conn.as_mut().graceful_shutdown();
162 | conn.await
163 | }
164 | } {
165 | error!("Error serving connection: {}", err);
166 | }
167 | });
168 | }
169 | _ = guard.cancelled() => {
170 | break;
171 | }
172 | }
173 | }
174 |
175 | shutdown.shutdown().await;
176 |
177 | Ok(())
178 | }
179 | }
180 |
--------------------------------------------------------------------------------
/src/rewind.rs:
--------------------------------------------------------------------------------
1 | // adapted from https://github.com/hyperium/hyper/blob/master/src/common/io/rewind.rs
2 |
3 | use hyper::body::Bytes;
4 | use std::{
5 | cmp,
6 | io::{self, IoSlice},
7 | pin::Pin,
8 | task::{Context, Poll},
9 | };
10 | use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
11 |
12 | /// Combine a buffer with an IO, rewinding reads to use the buffer.
13 | #[derive(Debug)]
14 | pub(crate) struct Rewind {
15 | pre: Option,
16 | inner: T,
17 | }
18 |
19 | impl Rewind {
20 | pub(crate) fn new(io: T, buf: Bytes) -> Self {
21 | Rewind {
22 | pre: Some(buf),
23 | inner: io,
24 | }
25 | }
26 | }
27 |
28 | impl AsyncRead for Rewind
29 | where
30 | T: AsyncRead + Unpin,
31 | {
32 | fn poll_read(
33 | mut self: Pin<&mut Self>,
34 | cx: &mut Context<'_>,
35 | buf: &mut ReadBuf<'_>,
36 | ) -> Poll> {
37 | if let Some(mut prefix) = self.pre.take() {
38 | // If there are no remaining bytes, let the bytes get dropped.
39 | if !prefix.is_empty() {
40 | let copy_len = cmp::min(prefix.len(), buf.remaining());
41 | buf.put_slice(&prefix.split_to(copy_len));
42 | // Put back what's left
43 | if !prefix.is_empty() {
44 | self.pre = Some(prefix);
45 | }
46 |
47 | return Poll::Ready(Ok(()));
48 | }
49 | }
50 |
51 | Pin::new(&mut self.inner).poll_read(cx, buf)
52 | }
53 | }
54 |
55 | impl AsyncWrite for Rewind
56 | where
57 | T: AsyncWrite + Unpin,
58 | {
59 | fn poll_write(
60 | mut self: Pin<&mut Self>,
61 | cx: &mut Context<'_>,
62 | buf: &[u8],
63 | ) -> Poll> {
64 | Pin::new(&mut self.inner).poll_write(cx, buf)
65 | }
66 |
67 | fn poll_write_vectored(
68 | mut self: Pin<&mut Self>,
69 | cx: &mut Context<'_>,
70 | bufs: &[IoSlice<'_>],
71 | ) -> Poll> {
72 | Pin::new(&mut self.inner).poll_write_vectored(cx, bufs)
73 | }
74 |
75 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
76 | Pin::new(&mut self.inner).poll_flush(cx)
77 | }
78 |
79 | fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
80 | Pin::new(&mut self.inner).poll_shutdown(cx)
81 | }
82 |
83 | fn is_write_vectored(&self) -> bool {
84 | self.inner.is_write_vectored()
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/tests/common/mod.rs:
--------------------------------------------------------------------------------
1 | use async_compression::tokio::bufread::GzipEncoder;
2 | use futures::{SinkExt, StreamExt};
3 | use hudsucker::{
4 | Body, HttpContext, HttpHandler, Proxy, RequestOrResponse, WebSocketContext, WebSocketHandler,
5 | certificate_authority::CertificateAuthority,
6 | decode_request, decode_response,
7 | hyper::{
8 | Method, Request, Response, StatusCode, body::Incoming, header::CONTENT_ENCODING,
9 | service::service_fn,
10 | },
11 | hyper_util::{
12 | client::legacy::{
13 | Client,
14 | connect::{Connect, HttpConnector},
15 | },
16 | rt::{TokioExecutor, TokioIo},
17 | server::conn::auto,
18 | },
19 | rustls,
20 | tokio_tungstenite::tungstenite::{Message, Utf8Bytes},
21 | };
22 | use reqwest::tls::Certificate;
23 | use rustls_pemfile as pemfile;
24 | use std::{
25 | convert::Infallible,
26 | net::SocketAddr,
27 | sync::{
28 | Arc,
29 | atomic::{AtomicUsize, Ordering},
30 | },
31 | };
32 | use tokio::{net::TcpListener, sync::oneshot::Sender};
33 | use tokio_graceful::Shutdown;
34 | use tokio_native_tls::native_tls;
35 | use tokio_util::io::ReaderStream;
36 |
37 | pub const HELLO_WORLD: &str = "Hello, World";
38 | pub const WORLD: Utf8Bytes = Utf8Bytes::from_static("world");
39 |
40 | async fn test_server(req: Request) -> Result, Infallible> {
41 | if hyper_tungstenite::is_upgrade_request(&req) {
42 | let (res, ws) = hyper_tungstenite::upgrade(req, None).unwrap();
43 |
44 | tokio::spawn(async move {
45 | let mut ws = ws.await.unwrap();
46 |
47 | while let Some(msg) = ws.next().await {
48 | let msg = msg.unwrap();
49 | if msg.is_close() {
50 | break;
51 | }
52 | ws.send(Message::Text(WORLD)).await.unwrap();
53 | }
54 | });
55 |
56 | return Ok(res.map(Body::from));
57 | }
58 |
59 | match (req.method(), req.uri().path()) {
60 | (&Method::GET, "/hello") => Ok(Response::new(Body::from(HELLO_WORLD))),
61 | (&Method::GET, "/hello/gzip") => Ok(Response::builder()
62 | .header(CONTENT_ENCODING, "gzip")
63 | .status(StatusCode::OK)
64 | .body(Body::from_stream(ReaderStream::new(GzipEncoder::new(
65 | HELLO_WORLD.as_bytes(),
66 | ))))
67 | .unwrap()),
68 | (&Method::POST, "/echo") => Ok(Response::new(req.into())),
69 | _ => Ok(Response::new(Body::empty())),
70 | }
71 | }
72 |
73 | pub async fn start_http_server() -> Result<(SocketAddr, Sender<()>), Box> {
74 | let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await?;
75 | let addr = listener.local_addr()?;
76 | let (tx, rx) = tokio::sync::oneshot::channel();
77 |
78 | tokio::spawn(async move {
79 | let server = auto::Builder::new(TokioExecutor::new());
80 | let shutdown = Shutdown::new(async { rx.await.unwrap_or_default() });
81 | let guard = shutdown.guard_weak();
82 |
83 | loop {
84 | tokio::select! {
85 | res = listener.accept() => {
86 | let (tcp, _) = res.unwrap();
87 | let server = server.clone();
88 |
89 | shutdown.spawn_task(async move {
90 | server
91 | .serve_connection_with_upgrades(TokioIo::new(tcp), service_fn(test_server))
92 | .await
93 | .unwrap();
94 | });
95 | }
96 | _ = guard.cancelled() => {
97 | break;
98 | }
99 | }
100 | }
101 |
102 | shutdown.shutdown().await;
103 | });
104 |
105 | Ok((addr, tx))
106 | }
107 |
108 | pub async fn start_https_server(
109 | ca: impl CertificateAuthority,
110 | ) -> Result<(SocketAddr, Sender<()>), Box> {
111 | let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await?;
112 | let addr = listener.local_addr()?;
113 | let acceptor: tokio_rustls::TlsAcceptor = ca
114 | .gen_server_config(&"localhost".parse().unwrap())
115 | .await
116 | .into();
117 | let (tx, rx) = tokio::sync::oneshot::channel();
118 |
119 | tokio::spawn(async move {
120 | let server = auto::Builder::new(TokioExecutor::new());
121 | let shutdown = Shutdown::new(async { rx.await.unwrap_or_default() });
122 | let guard = shutdown.guard_weak();
123 |
124 | loop {
125 | tokio::select! {
126 | res = listener.accept() => {
127 | let (tcp, _) = res.unwrap();
128 | let tcp = acceptor.accept(tcp).await.unwrap();
129 | let server = server.clone();
130 |
131 | shutdown.spawn_task(async move {
132 | server
133 | .serve_connection_with_upgrades(TokioIo::new(tcp), service_fn(test_server))
134 | .await
135 | .unwrap();
136 | });
137 | }
138 | _ = guard.cancelled() => {
139 | break;
140 | }
141 | }
142 | }
143 |
144 | shutdown.shutdown().await;
145 | });
146 |
147 | Ok((addr, tx))
148 | }
149 |
150 | pub fn http_client() -> Client {
151 | Client::builder(TokioExecutor::new()).build_http()
152 | }
153 |
154 | pub fn plain_websocket_connector() -> tokio_tungstenite::Connector {
155 | tokio_tungstenite::Connector::Plain
156 | }
157 |
158 | fn rustls_client_config() -> rustls::ClientConfig {
159 | let mut roots = rustls::RootCertStore::empty();
160 |
161 | for cert in rustls_native_certs::load_native_certs().unwrap() {
162 | roots.add(cert.clone()).unwrap();
163 | }
164 |
165 | let mut ca_cert_bytes: &[u8] = include_bytes!("../../examples/ca/hudsucker.cer");
166 | let ca_cert = pemfile::certs(&mut ca_cert_bytes)
167 | .next()
168 | .unwrap()
169 | .expect("Failed to parse CA certificate");
170 |
171 | roots.add(ca_cert).unwrap();
172 |
173 | rustls::ClientConfig::builder()
174 | .with_root_certificates(roots)
175 | .with_no_client_auth()
176 | }
177 |
178 | pub fn rustls_websocket_connector() -> tokio_tungstenite::Connector {
179 | tokio_tungstenite::Connector::Rustls(Arc::new(rustls_client_config()))
180 | }
181 |
182 | pub fn rustls_client() -> Client, Body> {
183 | let https = hyper_rustls::HttpsConnectorBuilder::new()
184 | .with_tls_config(rustls_client_config())
185 | .https_or_http()
186 | .enable_http1()
187 | .build();
188 |
189 | Client::builder(TokioExecutor::new())
190 | .http1_title_case_headers(true)
191 | .http1_preserve_header_case(true)
192 | .build(https)
193 | }
194 |
195 | fn native_tls_connector() -> native_tls::TlsConnector {
196 | let ca_cert =
197 | native_tls::Certificate::from_pem(include_bytes!("../../examples/ca/hudsucker.cer"))
198 | .unwrap();
199 |
200 | native_tls::TlsConnector::builder()
201 | .add_root_certificate(ca_cert)
202 | .build()
203 | .unwrap()
204 | }
205 |
206 | pub fn native_tls_websocket_connector() -> tokio_tungstenite::Connector {
207 | tokio_tungstenite::Connector::NativeTls(native_tls_connector())
208 | }
209 |
210 | pub fn native_tls_client() -> Client, Body> {
211 | let mut http = HttpConnector::new();
212 | http.enforce_http(false);
213 |
214 | let tls = native_tls_connector().into();
215 | let https = (http, tls).into();
216 |
217 | Client::builder(TokioExecutor::new()).build(https)
218 | }
219 |
220 | pub async fn start_proxy(
221 | ca: impl CertificateAuthority,
222 | client: Client,
223 | websocket_connector: tokio_tungstenite::Connector,
224 | ) -> Result<(SocketAddr, TestHandler, Sender<()>), Box>
225 | where
226 | C: Connect + Clone + Send + Sync + 'static,
227 | {
228 | _start_proxy(ca, client, websocket_connector, true).await
229 | }
230 |
231 | pub async fn start_proxy_without_intercept(
232 | ca: impl CertificateAuthority,
233 | client: Client,
234 | websocket_connector: tokio_tungstenite::Connector,
235 | ) -> Result<(SocketAddr, TestHandler, Sender<()>), Box>
236 | where
237 | C: Connect + Clone + Send + Sync + 'static,
238 | {
239 | _start_proxy(ca, client, websocket_connector, false).await
240 | }
241 |
242 | async fn _start_proxy(
243 | ca: impl CertificateAuthority,
244 | client: Client,
245 | websocket_connector: tokio_tungstenite::Connector,
246 | should_intercept: bool,
247 | ) -> Result<(SocketAddr, TestHandler, Sender<()>), Box>
248 | where
249 | C: Connect + Clone + Send + Sync + 'static,
250 | {
251 | let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await?;
252 | let addr = listener.local_addr()?;
253 | let (tx, rx) = tokio::sync::oneshot::channel();
254 |
255 | let handler = TestHandler::new(should_intercept);
256 |
257 | let proxy = Proxy::builder()
258 | .with_listener(listener)
259 | .with_ca(ca)
260 | .with_client(client)
261 | .with_http_handler(handler.clone())
262 | .with_websocket_handler(handler.clone())
263 | .with_websocket_connector(websocket_connector)
264 | .with_graceful_shutdown(async {
265 | rx.await.unwrap_or_default();
266 | })
267 | .build()
268 | .expect("Failed to create proxy");
269 |
270 | tokio::spawn(proxy.start());
271 | Ok((addr, handler, tx))
272 | }
273 |
274 | pub async fn start_noop_proxy(
275 | ca: impl CertificateAuthority,
276 | ) -> Result<(SocketAddr, Sender<()>), Box> {
277 | let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await?;
278 | let addr = listener.local_addr()?;
279 | let (tx, rx) = tokio::sync::oneshot::channel();
280 |
281 | let proxy = Proxy::builder()
282 | .with_listener(listener)
283 | .with_ca(ca)
284 | .with_client(native_tls_client())
285 | .with_graceful_shutdown(async {
286 | rx.await.unwrap_or_default();
287 | })
288 | .build()
289 | .expect("Failed to create proxy");
290 |
291 | tokio::spawn(proxy.start());
292 | Ok((addr, tx))
293 | }
294 |
295 | pub fn build_client(proxy: &str) -> reqwest::Client {
296 | let proxy = reqwest::Proxy::all(proxy).unwrap();
297 | let ca_cert = Certificate::from_pem(include_bytes!("../../examples/ca/hudsucker.cer")).unwrap();
298 |
299 | reqwest::Client::builder()
300 | .proxy(proxy)
301 | .add_root_certificate(ca_cert)
302 | .no_brotli()
303 | .no_deflate()
304 | .no_gzip()
305 | .build()
306 | .unwrap()
307 | }
308 |
309 | #[derive(Clone)]
310 | pub struct TestHandler {
311 | pub request_counter: Arc,
312 | pub response_counter: Arc,
313 | pub message_counter: Arc,
314 | pub should_intercept: bool,
315 | }
316 |
317 | impl TestHandler {
318 | pub fn new(should_intercept: bool) -> Self {
319 | Self {
320 | request_counter: Arc::new(AtomicUsize::new(0)),
321 | response_counter: Arc::new(AtomicUsize::new(0)),
322 | message_counter: Arc::new(AtomicUsize::new(0)),
323 | should_intercept,
324 | }
325 | }
326 | }
327 |
328 | impl HttpHandler for TestHandler {
329 | async fn handle_request(
330 | &mut self,
331 | _ctx: &HttpContext,
332 | req: Request,
333 | ) -> RequestOrResponse {
334 | self.request_counter.fetch_add(1, Ordering::Relaxed);
335 | let req = decode_request(req).unwrap();
336 | RequestOrResponse::Request(req)
337 | }
338 |
339 | async fn handle_response(&mut self, _ctx: &HttpContext, res: Response) -> Response {
340 | self.response_counter.fetch_add(1, Ordering::Relaxed);
341 | decode_response(res).unwrap()
342 | }
343 |
344 | async fn should_intercept(&mut self, _ctx: &HttpContext, _req: &Request) -> bool {
345 | self.should_intercept
346 | }
347 | }
348 |
349 | impl WebSocketHandler for TestHandler {
350 | async fn handle_message(&mut self, _ctx: &WebSocketContext, msg: Message) -> Option {
351 | self.message_counter.fetch_add(1, Ordering::Relaxed);
352 | Some(msg)
353 | }
354 | }
355 |
--------------------------------------------------------------------------------
/tests/openssl_ca.rs:
--------------------------------------------------------------------------------
1 | use hudsucker::{
2 | certificate_authority::OpensslAuthority,
3 | openssl::{hash::MessageDigest, pkey::PKey, x509::X509},
4 | rustls::crypto::aws_lc_rs,
5 | };
6 | use std::sync::atomic::Ordering;
7 |
8 | mod common;
9 |
10 | fn build_ca() -> OpensslAuthority {
11 | let private_key_bytes: &[u8] = include_bytes!("../examples/ca/hudsucker.key");
12 | let ca_cert_bytes: &[u8] = include_bytes!("../examples/ca/hudsucker.cer");
13 | let private_key =
14 | PKey::private_key_from_pem(private_key_bytes).expect("Failed to parse private key");
15 | let ca_cert = X509::from_pem(ca_cert_bytes).expect("Failed to parse CA certificate");
16 |
17 | OpensslAuthority::new(
18 | private_key,
19 | ca_cert,
20 | MessageDigest::sha256(),
21 | 1_000,
22 | aws_lc_rs::default_provider(),
23 | )
24 | }
25 |
26 | #[tokio::test]
27 | async fn https_rustls() {
28 | let (proxy_addr, handler, stop_proxy) = common::start_proxy(
29 | build_ca(),
30 | common::rustls_client(),
31 | common::rustls_websocket_connector(),
32 | )
33 | .await
34 | .unwrap();
35 |
36 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
37 | let client = common::build_client(&proxy_addr.to_string());
38 |
39 | let res = client
40 | .get(format!("https://localhost:{}/hello", server_addr.port()))
41 | .send()
42 | .await
43 | .unwrap();
44 |
45 | assert_eq!(res.status(), 200);
46 | assert_eq!(handler.request_counter.load(Ordering::Relaxed), 2);
47 | assert_eq!(handler.response_counter.load(Ordering::Relaxed), 1);
48 |
49 | stop_server.send(()).unwrap();
50 | stop_proxy.send(()).unwrap();
51 | }
52 |
53 | #[tokio::test]
54 | async fn https_native_tls() {
55 | let (proxy_addr, handler, stop_proxy) = common::start_proxy(
56 | build_ca(),
57 | common::native_tls_client(),
58 | common::native_tls_websocket_connector(),
59 | )
60 | .await
61 | .unwrap();
62 |
63 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
64 | let client = common::build_client(&proxy_addr.to_string());
65 |
66 | let res = client
67 | .get(format!("https://localhost:{}/hello", server_addr.port()))
68 | .send()
69 | .await
70 | .unwrap();
71 |
72 | assert_eq!(res.status(), 200);
73 | assert_eq!(handler.request_counter.load(Ordering::Relaxed), 2);
74 | assert_eq!(handler.response_counter.load(Ordering::Relaxed), 1);
75 |
76 | stop_server.send(()).unwrap();
77 | stop_proxy.send(()).unwrap();
78 | }
79 |
80 | #[tokio::test]
81 | async fn without_intercept() {
82 | let (proxy_addr, handler, stop_proxy) = common::start_proxy_without_intercept(
83 | build_ca(),
84 | common::http_client(),
85 | common::plain_websocket_connector(),
86 | )
87 | .await
88 | .unwrap();
89 |
90 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
91 | let client = common::build_client(&proxy_addr.to_string());
92 |
93 | let res = client
94 | .get(format!("https://localhost:{}/hello", server_addr.port()))
95 | .send()
96 | .await
97 | .unwrap();
98 |
99 | assert_eq!(res.status(), 200);
100 | assert_eq!(handler.request_counter.load(Ordering::Relaxed), 1);
101 | assert_eq!(handler.response_counter.load(Ordering::Relaxed), 0);
102 |
103 | stop_server.send(()).unwrap();
104 | stop_proxy.send(()).unwrap();
105 | }
106 |
107 | #[tokio::test]
108 | async fn decodes_response() {
109 | let (proxy_addr, _, stop_proxy) = common::start_proxy(
110 | build_ca(),
111 | common::native_tls_client(),
112 | common::native_tls_websocket_connector(),
113 | )
114 | .await
115 | .unwrap();
116 |
117 | let (server_addr, stop_server) = common::start_http_server().await.unwrap();
118 | let client = common::build_client(&proxy_addr.to_string());
119 |
120 | let res = client
121 | .get(format!("http://{}/hello/gzip", server_addr))
122 | .send()
123 | .await
124 | .unwrap();
125 |
126 | assert_eq!(res.status(), 200);
127 | assert_eq!(res.bytes().await.unwrap(), common::HELLO_WORLD);
128 |
129 | stop_server.send(()).unwrap();
130 | stop_proxy.send(()).unwrap();
131 | }
132 |
133 | #[tokio::test]
134 | async fn noop() {
135 | let (proxy_addr, stop_proxy) = common::start_noop_proxy(build_ca()).await.unwrap();
136 | let (server_addr, stop_server) = common::start_http_server().await.unwrap();
137 | let client = common::build_client(&proxy_addr.to_string());
138 |
139 | let res = client
140 | .get(format!("http://{}/hello", server_addr))
141 | .send()
142 | .await
143 | .unwrap();
144 |
145 | assert_eq!(res.status(), 200);
146 | assert_eq!(res.bytes().await.unwrap(), common::HELLO_WORLD);
147 |
148 | stop_server.send(()).unwrap();
149 | stop_proxy.send(()).unwrap();
150 | }
151 |
--------------------------------------------------------------------------------
/tests/rcgen_ca.rs:
--------------------------------------------------------------------------------
1 | use hudsucker::{
2 | certificate_authority::RcgenAuthority,
3 | rcgen::{CertificateParams, KeyPair},
4 | rustls::crypto::aws_lc_rs,
5 | };
6 | use std::sync::atomic::Ordering;
7 |
8 | mod common;
9 |
10 | fn build_ca() -> RcgenAuthority {
11 | let key_pair = include_str!("../examples/ca/hudsucker.key");
12 | let ca_cert = include_str!("../examples/ca/hudsucker.cer");
13 | let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
14 | let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
15 | .expect("Failed to parse CA certificate")
16 | .self_signed(&key_pair)
17 | .expect("Failed to sign CA certificate");
18 |
19 | RcgenAuthority::new(key_pair, ca_cert, 1000, aws_lc_rs::default_provider())
20 | }
21 |
22 | #[tokio::test]
23 | async fn https_rustls() {
24 | let (proxy_addr, handler, stop_proxy) = common::start_proxy(
25 | build_ca(),
26 | common::rustls_client(),
27 | common::rustls_websocket_connector(),
28 | )
29 | .await
30 | .unwrap();
31 |
32 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
33 | let client = common::build_client(&proxy_addr.to_string());
34 |
35 | let res = client
36 | .get(format!("https://localhost:{}/hello", server_addr.port()))
37 | .send()
38 | .await
39 | .unwrap();
40 |
41 | assert_eq!(res.status(), 200);
42 | assert_eq!(handler.request_counter.load(Ordering::Relaxed), 2);
43 | assert_eq!(handler.response_counter.load(Ordering::Relaxed), 1);
44 |
45 | stop_server.send(()).unwrap();
46 | stop_proxy.send(()).unwrap();
47 | }
48 |
49 | #[tokio::test]
50 | async fn https_native_tls() {
51 | let (proxy_addr, handler, stop_proxy) = common::start_proxy(
52 | build_ca(),
53 | common::native_tls_client(),
54 | common::native_tls_websocket_connector(),
55 | )
56 | .await
57 | .unwrap();
58 |
59 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
60 | let client = common::build_client(&proxy_addr.to_string());
61 |
62 | let res = client
63 | .get(format!("https://localhost:{}/hello", server_addr.port()))
64 | .send()
65 | .await
66 | .unwrap();
67 |
68 | assert_eq!(res.status(), 200);
69 | assert_eq!(handler.request_counter.load(Ordering::Relaxed), 2);
70 | assert_eq!(handler.response_counter.load(Ordering::Relaxed), 1);
71 |
72 | stop_server.send(()).unwrap();
73 | stop_proxy.send(()).unwrap();
74 | }
75 |
76 | #[tokio::test]
77 | async fn without_intercept() {
78 | let (proxy_addr, handler, stop_proxy) = common::start_proxy_without_intercept(
79 | build_ca(),
80 | common::http_client(),
81 | common::plain_websocket_connector(),
82 | )
83 | .await
84 | .unwrap();
85 |
86 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
87 | let client = common::build_client(&proxy_addr.to_string());
88 |
89 | let res = client
90 | .get(format!("https://localhost:{}/hello", server_addr.port()))
91 | .send()
92 | .await
93 | .unwrap();
94 |
95 | assert_eq!(res.status(), 200);
96 | assert_eq!(handler.request_counter.load(Ordering::Relaxed), 1);
97 | assert_eq!(handler.response_counter.load(Ordering::Relaxed), 0);
98 |
99 | stop_server.send(()).unwrap();
100 | stop_proxy.send(()).unwrap();
101 | }
102 |
103 | #[tokio::test]
104 | async fn decodes_response() {
105 | let (proxy_addr, _, stop_proxy) = common::start_proxy(
106 | build_ca(),
107 | common::native_tls_client(),
108 | common::native_tls_websocket_connector(),
109 | )
110 | .await
111 | .unwrap();
112 |
113 | let (server_addr, stop_server) = common::start_http_server().await.unwrap();
114 | let client = common::build_client(&proxy_addr.to_string());
115 |
116 | let res = client
117 | .get(format!("http://{}/hello/gzip", server_addr))
118 | .send()
119 | .await
120 | .unwrap();
121 |
122 | assert_eq!(res.status(), 200);
123 | assert_eq!(res.bytes().await.unwrap(), common::HELLO_WORLD);
124 |
125 | stop_server.send(()).unwrap();
126 | stop_proxy.send(()).unwrap();
127 | }
128 |
129 | #[tokio::test]
130 | async fn noop() {
131 | let (proxy_addr, stop_proxy) = common::start_noop_proxy(build_ca()).await.unwrap();
132 | let (server_addr, stop_server) = common::start_http_server().await.unwrap();
133 | let client = common::build_client(&proxy_addr.to_string());
134 |
135 | let res = client
136 | .get(format!("http://{}/hello", server_addr))
137 | .send()
138 | .await
139 | .unwrap();
140 |
141 | assert_eq!(res.status(), 200);
142 | assert_eq!(res.bytes().await.unwrap(), common::HELLO_WORLD);
143 |
144 | stop_server.send(()).unwrap();
145 | stop_proxy.send(()).unwrap();
146 | }
147 |
--------------------------------------------------------------------------------
/tests/websocket.rs:
--------------------------------------------------------------------------------
1 | use async_http_proxy::http_connect_tokio;
2 | use futures::{SinkExt, StreamExt};
3 | use hudsucker::{
4 | certificate_authority::RcgenAuthority,
5 | rcgen::{CertificateParams, KeyPair},
6 | rustls::crypto::aws_lc_rs,
7 | tokio_tungstenite::tungstenite::{Message, Utf8Bytes},
8 | };
9 | use std::sync::atomic::Ordering;
10 | use tokio::net::TcpStream;
11 |
12 | #[allow(unused)]
13 | mod common;
14 |
15 | const HELLO: Utf8Bytes = Utf8Bytes::from_static("hello");
16 |
17 | fn build_ca() -> RcgenAuthority {
18 | let key_pair = include_str!("../examples/ca/hudsucker.key");
19 | let ca_cert = include_str!("../examples/ca/hudsucker.cer");
20 | let key_pair = KeyPair::from_pem(key_pair).expect("Failed to parse private key");
21 | let ca_cert = CertificateParams::from_ca_cert_pem(ca_cert)
22 | .expect("Failed to parse CA certificate")
23 | .self_signed(&key_pair)
24 | .expect("Failed to sign CA certificate");
25 |
26 | RcgenAuthority::new(key_pair, ca_cert, 1000, aws_lc_rs::default_provider())
27 | }
28 |
29 | #[tokio::test]
30 | async fn http() {
31 | let (proxy_addr, handler, stop_proxy) = common::start_proxy(
32 | build_ca(),
33 | common::native_tls_client(),
34 | common::native_tls_websocket_connector(),
35 | )
36 | .await
37 | .unwrap();
38 |
39 | let (server_addr, stop_server) = common::start_http_server().await.unwrap();
40 |
41 | let mut stream = TcpStream::connect(proxy_addr).await.unwrap();
42 | http_connect_tokio(
43 | &mut stream,
44 | &server_addr.ip().to_string(),
45 | server_addr.port(),
46 | )
47 | .await
48 | .unwrap();
49 |
50 | let (mut ws, _) = tokio_tungstenite::client_async(format!("ws://{}", server_addr), stream)
51 | .await
52 | .unwrap();
53 |
54 | ws.send(Message::Text(HELLO)).await.unwrap();
55 |
56 | let msg = ws.next().await.unwrap().unwrap();
57 |
58 | assert_eq!(msg.into_text().unwrap(), common::WORLD);
59 | assert_eq!(handler.message_counter.load(Ordering::Relaxed), 2);
60 |
61 | stop_server.send(()).unwrap();
62 | stop_proxy.send(()).unwrap();
63 | }
64 |
65 | #[tokio::test]
66 | async fn https_rustls() {
67 | let (proxy_addr, handler, stop_proxy) = common::start_proxy(
68 | build_ca(),
69 | common::rustls_client(),
70 | common::rustls_websocket_connector(),
71 | )
72 | .await
73 | .unwrap();
74 |
75 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
76 |
77 | let mut stream = TcpStream::connect(proxy_addr).await.unwrap();
78 | http_connect_tokio(&mut stream, "localhost", server_addr.port())
79 | .await
80 | .unwrap();
81 |
82 | let (mut ws, _) = tokio_tungstenite::client_async_tls_with_config(
83 | format!("wss://localhost:{}", server_addr.port()),
84 | stream,
85 | None,
86 | Some(common::rustls_websocket_connector()),
87 | )
88 | .await
89 | .unwrap();
90 |
91 | ws.send(Message::Text(HELLO)).await.unwrap();
92 |
93 | let msg = ws.next().await.unwrap().unwrap();
94 |
95 | assert_eq!(msg.into_text().unwrap(), common::WORLD);
96 | assert_eq!(handler.message_counter.load(Ordering::Relaxed), 2);
97 |
98 | stop_server.send(()).unwrap();
99 | stop_proxy.send(()).unwrap();
100 | }
101 |
102 | #[tokio::test]
103 | async fn https_native_tls() {
104 | let (proxy_addr, handler, stop_proxy) = common::start_proxy(
105 | build_ca(),
106 | common::native_tls_client(),
107 | common::native_tls_websocket_connector(),
108 | )
109 | .await
110 | .unwrap();
111 |
112 | let (server_addr, stop_server) = common::start_https_server(build_ca()).await.unwrap();
113 |
114 | let mut stream = TcpStream::connect(proxy_addr).await.unwrap();
115 | http_connect_tokio(&mut stream, "localhost", server_addr.port())
116 | .await
117 | .unwrap();
118 |
119 | let (mut ws, _) = tokio_tungstenite::client_async_tls_with_config(
120 | format!("wss://localhost:{}", server_addr.port()),
121 | stream,
122 | None,
123 | Some(common::native_tls_websocket_connector()),
124 | )
125 | .await
126 | .unwrap();
127 |
128 | ws.send(Message::Text(HELLO)).await.unwrap();
129 |
130 | let msg = ws.next().await.unwrap().unwrap();
131 |
132 | assert_eq!(msg.into_text().unwrap(), common::WORLD);
133 | assert_eq!(handler.message_counter.load(Ordering::Relaxed), 2);
134 |
135 | stop_server.send(()).unwrap();
136 | stop_proxy.send(()).unwrap();
137 | }
138 |
139 | #[tokio::test]
140 | async fn without_intercept() {
141 | let (proxy_addr, handler, stop_proxy) = common::start_proxy_without_intercept(
142 | build_ca(),
143 | common::http_client(),
144 | common::plain_websocket_connector(),
145 | )
146 | .await
147 | .unwrap();
148 |
149 | let (server_addr, stop_server) = common::start_http_server().await.unwrap();
150 |
151 | let mut stream = TcpStream::connect(proxy_addr).await.unwrap();
152 | http_connect_tokio(
153 | &mut stream,
154 | &server_addr.ip().to_string(),
155 | server_addr.port(),
156 | )
157 | .await
158 | .unwrap();
159 |
160 | let (mut ws, _) = tokio_tungstenite::client_async(format!("ws://{}", server_addr), stream)
161 | .await
162 | .unwrap();
163 |
164 | ws.send(Message::Text(HELLO)).await.unwrap();
165 |
166 | let msg = ws.next().await.unwrap().unwrap();
167 |
168 | assert_eq!(msg.into_text().unwrap(), common::WORLD);
169 | assert_eq!(handler.message_counter.load(Ordering::Relaxed), 0);
170 |
171 | stop_server.send(()).unwrap();
172 | stop_proxy.send(()).unwrap();
173 | }
174 |
175 | #[tokio::test]
176 | async fn noop() {
177 | let (proxy_addr, stop_proxy) = common::start_noop_proxy(build_ca()).await.unwrap();
178 | let (server_addr, stop_server) = common::start_http_server().await.unwrap();
179 |
180 | let mut stream = TcpStream::connect(proxy_addr).await.unwrap();
181 | http_connect_tokio(
182 | &mut stream,
183 | &server_addr.ip().to_string(),
184 | server_addr.port(),
185 | )
186 | .await
187 | .unwrap();
188 |
189 | let (mut ws, _) = tokio_tungstenite::client_async(format!("ws://{}", server_addr), stream)
190 | .await
191 | .unwrap();
192 |
193 | ws.send(Message::Text(HELLO)).await.unwrap();
194 | let msg = ws.next().await.unwrap().unwrap();
195 |
196 | assert_eq!(msg.into_text().unwrap(), common::WORLD);
197 |
198 | stop_server.send(()).unwrap();
199 | stop_proxy.send(()).unwrap();
200 | }
201 |
--------------------------------------------------------------------------------