├── .github └── workflows │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── MIGRATE-2-to-3.md ├── README.md ├── README.tpl ├── RELEASE.txt ├── cargo_deny.sh ├── deny.toml ├── examples ├── cureq.rs ├── mpsc-transport.rs └── proxy.rs └── src ├── agent.rs ├── body ├── brotli.rs ├── build.rs ├── charset.rs ├── gzip.rs ├── limit.rs ├── lossy.rs └── mod.rs ├── config.rs ├── cookies.rs ├── error.rs ├── lib.rs ├── middleware.rs ├── pool.rs ├── proxy.rs ├── query.rs ├── request.rs ├── request_ext.rs ├── response.rs ├── run.rs ├── send_body.rs ├── timings.rs ├── tls ├── cert.rs ├── mod.rs ├── native_tls.rs └── rustls.rs ├── unversioned ├── mod.rs ├── resolver.rs └── transport │ ├── buf.rs │ ├── chain.rs │ ├── io.rs │ ├── mod.rs │ ├── socks.rs │ ├── tcp.rs │ ├── test.rs │ └── time.rs └── util.rs /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: CI 4 | 5 | concurrency: 6 | group: ${{ github.workflow }}-${{ github.ref }} 7 | cancel-in-progress: true 8 | 9 | permissions: {} 10 | 11 | jobs: 12 | lint: 13 | name: Lint 14 | runs-on: ubuntu-latest 15 | env: 16 | RUSTFLAGS: -D warnings 17 | steps: 18 | - uses: actions/checkout@v4 19 | with: 20 | persist-credentials: false 21 | - name: Install Rust 22 | id: toolchain 23 | uses: dtolnay/rust-toolchain@stable 24 | with: 25 | components: rustfmt, clippy 26 | - name: Run Rustfmt 27 | run: cargo +${{steps.toolchain.outputs.name}} fmt --check 28 | - name: Run Clippy 29 | run: cargo +${{steps.toolchain.outputs.name}} clippy --all-features 30 | doc: 31 | name: Docs 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | with: 36 | persist-credentials: false 37 | - name: Install Rust 38 | id: toolchain 39 | uses: dtolnay/rust-toolchain@stable 40 | - name: Docs 41 | env: 42 | RUSTDOCFLAGS: -Dwarnings 43 | run: cargo +${{steps.toolchain.outputs.name}} doc --no-deps --all-features --document-private-items 44 | 45 | snowflake: 46 | runs-on: ubuntu-latest 47 | steps: 48 | - uses: actions/checkout@v4 49 | with: 50 | fetch-depth: 0 51 | - name: Martin's snowflake formatting rules 52 | uses: algesten/snowflake@v1.0.4 53 | with: 54 | check_diff: true 55 | 56 | build_versions: 57 | strategy: 58 | matrix: 59 | rust: [stable, beta, 1.71.1] 60 | runs-on: "ubuntu-latest" 61 | steps: 62 | - uses: actions/checkout@v4 63 | with: 64 | persist-credentials: false 65 | - uses: dtolnay/rust-toolchain@master 66 | id: toolchain 67 | with: 68 | toolchain: ${{ matrix.rust }} 69 | - uses: Swatinem/rust-cache@v2 70 | - name: Build 1 71 | run: cargo +${{steps.toolchain.outputs.name}} build 72 | - name: Build 2 73 | run: cargo +${{steps.toolchain.outputs.name}} build 74 | 75 | build_and_test: 76 | name: Test 77 | runs-on: ubuntu-latest 78 | strategy: 79 | matrix: 80 | feature: 81 | - "" 82 | - charset 83 | - cookies 84 | - socks-proxy 85 | - gzip 86 | - brotli 87 | - json 88 | - native-tls 89 | env: 90 | RUST_BACKTRACE: "1" 91 | RUSTFLAGS: "-D dead_code -D unused-variables -D unused" 92 | steps: 93 | - uses: actions/checkout@v4 94 | with: 95 | persist-credentials: false 96 | - name: Install Rust 97 | id: toolchain 98 | uses: dtolnay/rust-toolchain@stable 99 | - name: Test 100 | run: | 101 | cargo +${{steps.toolchain.outputs.name}} test \ 102 | --no-default-features --features "_test rustls ${{ matrix.feature }}" 103 | 104 | build_without_rustls: 105 | name: Test 106 | runs-on: ubuntu-latest 107 | strategy: 108 | matrix: 109 | feature: 110 | - "" 111 | - charset 112 | - cookies 113 | - socks-proxy 114 | - gzip 115 | - brotli 116 | - json 117 | - native-tls 118 | - rustls-no-provider 119 | env: 120 | RUST_BACKTRACE: "1" 121 | RUSTFLAGS: "-D dead_code -D unused-variables -D unused" 122 | steps: 123 | - uses: actions/checkout@v4 124 | with: 125 | persist-credentials: false 126 | - name: Install Rust 127 | id: toolchain 128 | uses: dtolnay/rust-toolchain@stable 129 | - name: Test 130 | run: | 131 | cargo +${{steps.toolchain.outputs.name}} test \ 132 | --no-default-features --features "_test ${{ matrix.feature }}" 133 | 134 | cargo-deny: 135 | name: cargo-deny 136 | 137 | # TODO: remove this matrix when https://github.com/EmbarkStudios/cargo-deny/issues/324 is resolved 138 | strategy: 139 | fail-fast: false 140 | matrix: 141 | platform: 142 | - aarch64-apple-ios 143 | - aarch64-linux-android 144 | - i686-pc-windows-gnu 145 | - i686-pc-windows-msvc 146 | - i686-unknown-linux-gnu 147 | - wasm32-unknown-unknown 148 | - x86_64-apple-darwin 149 | - x86_64-apple-ios 150 | - x86_64-pc-windows-gnu 151 | - x86_64-pc-windows-msvc 152 | - x86_64-unknown-linux-gnu 153 | - x86_64-unknown-redox 154 | 155 | runs-on: ubuntu-latest 156 | steps: 157 | - uses: actions/checkout@v4 158 | with: 159 | persist-credentials: false 160 | - uses: EmbarkStudios/cargo-deny-action@v2 161 | with: 162 | command: check 163 | log-level: error 164 | arguments: --all-features --target ${{ matrix.platform }} 165 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | rls 3 | .idea/ 4 | .vscode/ 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## License 2 | 3 | Copyright (c) 2019 Martin Algesten 4 | 5 | Licensed under either of 6 | 7 | * Apache License, Version 2.0 8 | ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 9 | * MIT license 10 | ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 11 | 12 | at your option. 13 | 14 | ## Contribution 15 | 16 | Unless you explicitly state otherwise, any contribution intentionally submitted 17 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 18 | dual licensed under the Apache License, Version 2.0, and the MIT license, without 19 | any additional terms or conditions. See LICENSE-APACHE and LICENSE-MIT for 20 | details. 21 | 22 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ureq" 3 | version = "3.0.11" 4 | authors = ["Martin Algesten ", "Jacob Hoffman-Andrews "] 5 | description = "Simple, safe HTTP client" 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/algesten/ureq" 8 | readme = "README.md" 9 | keywords = ["web", "request", "https", "http", "client"] 10 | categories = ["web-programming::http-client"] 11 | edition = "2018" 12 | exclude = ["/cargo_deny.sh", "/deny.toml", "/test.sh"] 13 | 14 | # MSRV 15 | rust-version = "1.71.1" 16 | 17 | [package.metadata.docs.rs] 18 | features = ["rustls", "platform-verifier", "native-tls", "socks-proxy", "cookies", "gzip", "brotli", "charset", "json", "_test", "_doc"] 19 | 20 | [features] 21 | default = ["rustls", "gzip"] 22 | 23 | ######## SUPPORTED FEATURES 24 | 25 | rustls = ["rustls-no-provider", "_ring"] 26 | native-tls = ["dep:native-tls", "dep:der", "_tls", "dep:webpki-root-certs"] 27 | platform-verifier = ["dep:rustls-platform-verifier"] 28 | socks-proxy = ["dep:socks"] 29 | cookies = ["dep:cookie_store", "_url"] 30 | gzip = ["dep:flate2"] 31 | brotli = ["dep:brotli-decompressor"] 32 | charset = ["dep:encoding_rs"] 33 | json = ["dep:serde", "dep:serde_json", "cookie_store?/serde_json"] 34 | 35 | ######## UNSTABLE FEATURES. 36 | # Might be removed or changed in a minor version. 37 | 38 | # Rustls CryptoProviders are not picked up from feature flags alone. They must be 39 | # configured on Agent. This feature flag makes it possible to compile ureq with 40 | # rustls, but without ring. 41 | rustls-no-provider = ["dep:rustls", "_tls", "dep:webpki-roots", "_rustls"] 42 | 43 | # Supported as long as native-tls supports this. 44 | vendored = ["native-tls?/vendored"] 45 | 46 | ######## INTERNAL FEATURES. DO NOT USE. 47 | 48 | # Ring has a higher chance of compiling cleanly without additional developer environment. 49 | # Supported as long as rustls supports this. 50 | _ring = ["rustls?/ring"] 51 | _url = ["dep:url"] 52 | _tls = ["dep:rustls-pemfile", "dep:rustls-pki-types"] 53 | _test = [] 54 | _rustls = [] 55 | _doc = ["rustls?/aws-lc-rs"] 56 | 57 | [dependencies] 58 | base64 = "0.22.1" 59 | ureq-proto = { version = "0.4.1", default-features = false, features = ["client"] } 60 | # ureq-proto = { path = "../ureq-proto", default-features = false, features = ["client"] } 61 | log = "0.4.25" 62 | utf-8 = "0.7.6" 63 | percent-encoding = "2.3.1" 64 | 65 | # These are used regardless of TLS implementation. 66 | rustls-pemfile = { version = "2.1.2", optional = true, default-features = false, features = ["std"] } 67 | rustls-pki-types = { version = "1.11.0", optional = true, default-features = false, features = ["std"] } 68 | rustls-platform-verifier = { version = "0.5.1", optional = true, default-features = false } 69 | webpki-roots = { version = "0.26.8", optional = true, default-features = false } 70 | webpki-root-certs = { version = "0.26.8", optional = true, default-features = false } 71 | 72 | rustls = { version = "0.23.22", optional = true, default-features = false, features = ["logging", "std", "tls12"] } 73 | # held back on 0.2.12 to avoid double dependency on windows-sys (0.59.0, 0.52.0) 74 | native-tls = { version = "0.2.12", optional = true, default-features = false } 75 | der = { version = "0.7.9", optional = true, default-features = false, features = ["pem", "std"] } 76 | 77 | socks = { version = "0.3.4", optional = true } 78 | 79 | # cookie_store uses Url, while http-crate has its own Uri. 80 | # Keep url crate in lockstep with cookie_store. 81 | cookie_store = { version = "0.21.1", optional = true, default-features = false, features = ["preserve_order"] } 82 | # ureq-proto forces url=2.5.4. This optional dep documents the situation in cookie_store. 83 | url = { version = "2.3.1", optional = true, default-features = false } 84 | 85 | flate2 = { version = "1.0.30", optional = true } 86 | brotli-decompressor = { version = "4.0.2", optional = true } 87 | encoding_rs = { version = "0.8.34", optional = true } 88 | 89 | serde = { version = "1.0.138", optional = true, default-features = false, features = ["std"] } 90 | serde_json = { version = "1.0.120", optional = true, default-features = false, features = ["std"] } 91 | 92 | [dev-dependencies] 93 | env_logger = "0.11.7" 94 | auto-args = "0.3.0" 95 | serde = { version = "1.0.204", features = ["std", "derive"] } 96 | assert_no_alloc = "1.1.2" 97 | # Enable aws-lc-rs for tests so we can demonstrate using ureq without compiling ring. 98 | rustls = { version = "0.23", features = ["aws-lc-rs"] } 99 | 100 | [[example]] 101 | name = "cureq" 102 | required-features = ["rustls", "native-tls", "socks-proxy", "cookies", "gzip", "brotli", "charset"] 103 | 104 | [[example]] 105 | name = "mpsc-transport" 106 | required-features = ["rustls"] 107 | 108 | [[example]] 109 | name = "proxy" 110 | required-features = ["rustls"] 111 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Martin Algesten 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MIGRATE-2-to-3.md: -------------------------------------------------------------------------------- 1 | # Changes ureq 2.x -> 3.x 2 | 3 | This is not an exhaustive list of changes. Most tweaks to the API are clear by looking 4 | at the docs. If anything is unclear, please open a PR and we can clarify further. 5 | 6 | ## Rewrite 7 | 8 | ureq 3.x is a ground up complete rewrite of ureq 2.x. The HTTP protocol is re-engineered 9 | to a Sans-IO style implementation living in the `ureq-proto` crate. Both protocol and ureq 10 | main crate remain `#![forbid(unsafe_code)]`. 11 | 12 | The goals of the project remain largely the same: A simple, sync, HTTP/1.1 client with 13 | a minimum number of dependencies. 14 | 15 | With Sans-IO the user can now implement their own `Transport` thus providing alternative 16 | TLS or non-socket based communication in crates mainitained outside the ureq project. The 17 | same goes for `Resolver`. 18 | 19 | ## HTTP Crate 20 | 21 | In 2.x ureq implemented it's own `Request` and `Response` structs. In 3.x, we 22 | drop our own impl in favor of the [http crate]. The http crate presents a unified HTTP 23 | API and can be found as a dependency of a number of big [http-related crates] in the 24 | Rust ecosystem. The idea is that presenting a well-known API towards users of ureq 25 | will make it easier to use. 26 | 27 | ## Re-exported crates must be semver 1.x (stable) 28 | 29 | ureq2.x re-exported a number of semver 0.x crates and thus suffered from that breaking 30 | changes in those crates technically were breaking changes in ureq (and thus ought to increase 31 | major version). In ureq 3.x we will strive to re-export as few crates as possible. 32 | 33 | * No re-exported tls config 34 | * No re-exported cookie crates 35 | * No re-exported json macro 36 | 37 | Instead we made our own TLS config and Cookie API, and drop the json macro. 38 | 39 | ## No retry idempotent 40 | 41 | ureq 2.x did an automatic retry of idempotent methods (GET, HEAD). This was considered 42 | confusing, so 3.x has no built-in retries. 43 | 44 | ## No send body charset 45 | 46 | For now, ureq 3.x can't change the charset of a send body. It can however still do that 47 | for the response body. 48 | 49 | [http crate]: https://crates.io/crates/http 50 | [http-related crates]: https://crates.io/crates/http/reverse_dependencies 51 | 52 | ## Features 53 | 54 | - `proxy-from-env` is the default now. CONNECT-proxy needs no extra feature flag, but `socks-proxy` does. 55 | - `native-certs` is built-in. In the `TlsConfig`, which you can set on agent or request level, you have three choices 56 | via the [`RootCerts`](https://docs.rs/ureq/3.0.6/ureq/tls/enum.RootCerts.html#variant.PlatformVerifier) enum. 57 | `Specific` when you want to set the root certs yourself, `PlatformVerifier`, which for rustls delegates to the system, 58 | and for native-tls means using the root certs native-tls is picking up (this is what you want), and finally `WebPki`, 59 | which uses the root certs bundled with ureq. -------------------------------------------------------------------------------- /README.tpl: -------------------------------------------------------------------------------- 1 | [comment]: # (README.md is autogenerated from src/lib.rs by `cargo readme > README.md`) 2 | 3 | # {{crate}} 4 | 5 | {{readme}} 6 | -------------------------------------------------------------------------------- /RELEASE.txt: -------------------------------------------------------------------------------- 1 | Releasing ureq 2 | ============== 3 | 4 | 1. UPDATE CHANGELOG 5 | 6 | Ensure the changelog is updated. Use git history to highlight the main 7 | changes, especially API changes. Smaller things can be omitted. 8 | 9 | Make a PR for changelog if there is time. 10 | 11 | 12 | 13 | 2. CHECK OUTDATED DEPS 14 | 15 | Quickly scan whether we can bump some dep. Use `cargo install 16 | cargo-outdated` as a helper to find them. 17 | 18 | cargo update 19 | cargo outdated --depth=1 20 | 21 | The initial update is just to ensure your checkout is using the latest 22 | deps allowed by Cargo.toml already. 23 | 24 | Outdated deps doesn't _have_ to make the release, use your judgement. 25 | 26 | Make a PR for outdated deps if there is time. 27 | 28 | 29 | 30 | 3. UPDATE Cargo.toml VERSION 31 | 32 | We follow semver. Bug fixes bump patch version, API changes bump minor 33 | version. Cargo bump is a helper to update the version in 34 | Cargo.toml. `cargo install cargo-bump` 35 | 36 | cargo bump patch 37 | 38 | Git commit Cargo.toml and push straight into master. 39 | 40 | 41 | 42 | 3. GIT TAG 43 | 44 | Each release has a corresponding git tag. For version `1.2.3` there would 45 | be a `git tag 1.2.3`. The tag should point to the bump commit pushed in 3. 46 | 47 | Do the tag and git push --tags. 48 | 49 | 50 | 51 | 4. WAIT FOR CI 52 | 53 | Both the push to master and following git tag will cause Github CI to 54 | run. Wait for both runs to complete to ensure we have a "good 55 | version". 56 | 57 | 58 | 59 | 5. PUBLISH 60 | 61 | Publish the release to crates.io. 62 | 63 | cargo publish 64 | -------------------------------------------------------------------------------- /cargo_deny.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # https://github.com/EmbarkStudios/cargo-deny 4 | # 5 | # cargo-deny checks our dependency tree for copy-left licenses, 6 | # duplicate dependencies, and rustsec advisories (https://rustsec.org/advisories). 7 | # 8 | # Install: `cargo install cargo-deny` 9 | # 10 | # This scripts checks the dependency tree for all targets. 11 | # cargo-deny is configured in `deny.toml`. 12 | 13 | set -eu 14 | script_path=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) 15 | cd "$script_path" 16 | set -x 17 | 18 | # cargo install cargo-deny 19 | cargo deny --all-features --log-level error --target aarch64-apple-darwin check 20 | cargo deny --all-features --log-level error --target i686-pc-windows-gnu check 21 | cargo deny --all-features --log-level error --target i686-pc-windows-msvc check 22 | cargo deny --all-features --log-level error --target i686-unknown-linux-gnu check 23 | cargo deny --all-features --log-level error --target wasm32-unknown-unknown check 24 | cargo deny --all-features --log-level error --target x86_64-apple-darwin check 25 | cargo deny --all-features --log-level error --target x86_64-pc-windows-gnu check 26 | cargo deny --all-features --log-level error --target x86_64-pc-windows-msvc check 27 | cargo deny --all-features --log-level error --target x86_64-unknown-linux-gnu check 28 | cargo deny --all-features --log-level error --target x86_64-unknown-linux-musl check 29 | cargo deny --all-features --log-level error --target x86_64-unknown-redox check 30 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | # https://github.com/EmbarkStudios/cargo-deny 2 | # 3 | # cargo-deny checks our dependency tree for copy-left licenses, 4 | # duplicate dependencies, and rustsec advisories (https://rustsec.org/advisories). 5 | # 6 | # Install: `cargo install cargo-deny` 7 | # Check: `cargo deny check` or run `cargo_deny.sh`. 8 | 9 | # Note: running just `cargo deny check` without a `--target` can result in 10 | # false positives due to https://github.com/EmbarkStudios/cargo-deny/issues/324 11 | targets = [ 12 | { triple = "aarch64-apple-darwin" }, 13 | { triple = "i686-pc-windows-gnu" }, 14 | { triple = "i686-pc-windows-msvc" }, 15 | { triple = "i686-unknown-linux-gnu" }, 16 | { triple = "wasm32-unknown-unknown" }, 17 | { triple = "x86_64-apple-darwin" }, 18 | { triple = "x86_64-pc-windows-gnu" }, 19 | { triple = "x86_64-pc-windows-msvc" }, 20 | { triple = "x86_64-unknown-linux-gnu" }, 21 | { triple = "x86_64-unknown-linux-musl" }, 22 | { triple = "x86_64-unknown-redox" }, 23 | ] 24 | 25 | # 2025-03-13: We get double windows-sys crates in our build deps due to aws-lc-rs/bindgen 26 | # The dep all other crates use is 0.59.0. Ideally we don't want to ignore this, but 27 | # for now it appears to be the only solution. 28 | # 29 | # = windows-sys v0.52.0 30 | # ├── errno v0.3.10 31 | # │ └── rustix v0.38.44 32 | # │ └── which v4.4.2 33 | # │ └── bindgen v0.69.5 34 | # │ └── (build) aws-lc-sys v0.27.0 35 | # 36 | 37 | # 2025-03-17: security-framework differs between native-tls and rustls-platform-verifier. 38 | # Hopefully only very few people end up using both rustls and native-tls at 39 | # the same time. 40 | # 41 | # = security-framework v2.11.1 42 | # └── native-tls v0.2.14 43 | # └── ureq v3.0.9 44 | # = security-framework v3.2.0 45 | # └── rustls-platform-verifier v0.5.1 46 | # └── ureq v3.0.9 47 | # 48 | 49 | exclude = ["windows-sys", "security-framework"] 50 | 51 | [advisories] 52 | yanked = "deny" 53 | ignore = [] 54 | 55 | [bans] 56 | multiple-versions = "deny" 57 | wildcards = "allow" # at least until https://github.com/EmbarkStudios/cargo-deny/issues/241 is fixed 58 | deny = [] 59 | 60 | skip = [ 61 | { name = "bitflags" }, # Unfortunate duplicate dependency due to old version beeing pulled in by `security-framework` 62 | ] 63 | skip-tree = [] 64 | 65 | 66 | [licenses] 67 | private = { ignore = true } 68 | confidence-threshold = 0.92 # We want really high confidence when inferring licenses from text 69 | allow = [ 70 | "Apache-2.0 WITH LLVM-exception", # https://spdx.org/licenses/LLVM-exception.html 71 | "Apache-2.0", # https://tldrlegal.com/license/apache-license-2.0-(apache-2.0) 72 | "BSD-2-Clause", # https://tldrlegal.com/license/bsd-2-clause-license-(freebsd) 73 | "BSD-3-Clause", # https://tldrlegal.com/license/bsd-3-clause-license-(revised) 74 | "BSL-1.0", # https://tldrlegal.com/license/boost-software-license-1.0-explained 75 | "CC0-1.0", # https://creativecommons.org/publicdomain/zero/1.0/ 76 | "ISC", # https://tldrlegal.com/license/-isc-license 77 | "LicenseRef-UFL-1.0", # https://tldrlegal.com/license/ubuntu-font-license,-1.0 - no official SPDX, see https://github.com/emilk/egui/issues/2321 78 | "MIT-0", # https://choosealicense.com/licenses/mit-0/ 79 | "MIT", # https://tldrlegal.com/license/mit-license 80 | "MPL-2.0", # https://www.mozilla.org/en-US/MPL/2.0/FAQ/ - see Q11. Used by webpki-roots on Linux. 81 | "OFL-1.1", # https://spdx.org/licenses/OFL-1.1.html 82 | "OpenSSL", # https://www.openssl.org/source/license.html - used on Linux 83 | "Unicode-DFS-2016", # https://spdx.org/licenses/Unicode-DFS-2016.html 84 | "Unicode-3.0", # https://www.unicode.org/license.txt 85 | "Zlib", # https://tldrlegal.com/license/zlib-libpng-license-(zlib) 86 | ] 87 | 88 | [[licenses.clarify]] 89 | name = "webpki" 90 | expression = "ISC" 91 | license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] 92 | 93 | [[licenses.clarify]] 94 | name = "ring" 95 | expression = "MIT AND ISC AND OpenSSL" 96 | license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] 97 | -------------------------------------------------------------------------------- /examples/cureq.rs: -------------------------------------------------------------------------------- 1 | use std::io::{BufRead, BufReader}; 2 | use std::process; 3 | use std::time::Duration; 4 | 5 | use auto_args::AutoArgs; 6 | use ureq::tls::TlsConfig; 7 | use ureq::Agent; 8 | 9 | #[derive(Debug, AutoArgs)] 10 | struct Opt { 11 | /// Print headers 12 | include: Option, 13 | 14 | /// Timeout for entire request (seconds) 15 | max_time: Option, 16 | 17 | /// Disable certificate verification 18 | insecure: Option, 19 | 20 | /// URL to request 21 | url: String, 22 | } 23 | 24 | fn main() { 25 | env_logger::init(); 26 | let opt = Opt::from_args(); 27 | if let Err(e) = run(&opt) { 28 | eprintln!("{} - {}", e, opt.url); 29 | process::exit(1); 30 | } 31 | } 32 | 33 | fn run(opt: &Opt) -> Result<(), ureq::Error> { 34 | let agent: Agent = Agent::config_builder() 35 | .timeout_global(opt.max_time.map(|t| Duration::from_secs(t.into()))) 36 | .tls_config( 37 | TlsConfig::builder() 38 | .disable_verification(opt.insecure.unwrap_or(false)) 39 | .build(), 40 | ) 41 | .build() 42 | .into(); 43 | 44 | let mut res = agent.get(&opt.url).call()?; 45 | 46 | if opt.include.unwrap_or(false) { 47 | eprintln!("{:#?}", res.headers()); 48 | } 49 | 50 | const MAX_BODY_SIZE: u64 = 5 * 1024 * 1024; 51 | 52 | let reader = BufReader::new(res.body_mut().with_config().limit(MAX_BODY_SIZE).reader()); 53 | let lines = reader.lines(); 54 | 55 | for r in lines { 56 | let line = match r { 57 | Ok(v) => v, 58 | Err(e) => return Err(e.into()), 59 | }; 60 | println!("{}", line); 61 | } 62 | 63 | Ok(()) 64 | } 65 | -------------------------------------------------------------------------------- /examples/mpsc-transport.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{mpsc, Arc, Mutex}; 2 | use std::thread; 3 | use std::time::Duration; 4 | 5 | use ureq::config::Config; 6 | use ureq::unversioned::resolver::DefaultResolver; 7 | use ureq::unversioned::transport::{Buffers, ConnectionDetails, Connector, LazyBuffers}; 8 | use ureq::unversioned::transport::{NextTimeout, RustlsConnector, Transport}; 9 | use ureq::{Agent, Error}; 10 | 11 | pub fn main() { 12 | // To see some inner workings of ureq, this example can be interesting to run with: 13 | // RUST_LOG=trace cargo run --example mpsc-transport 14 | env_logger::init(); 15 | 16 | // Our very own connector. 17 | let connector = MpscConnector::default(); 18 | 19 | // Spawn a fake server in a thread. We take the server_side TxRx to be able to 20 | // communicate with the client. 21 | let server_side = connector.server_side.clone(); 22 | thread::spawn(|| run_server(server_side)); 23 | 24 | let chain = connector 25 | // For educational purposes, we wrap add the RustlsConnector to the chain. 26 | // This would mean an URL that starts with https:// will be wrapped in TLS 27 | // letting our MpscConnector handle the "underlying" transport 28 | // with TLS on top. 29 | // 30 | // This example does not use an https URL since that would require us 31 | // to terminate the TLS in the server side, which is more involved than 32 | // this example is trying to show. 33 | .chain(RustlsConnector::default()); 34 | 35 | // Use default config and resolver. 36 | let config = Config::default(); 37 | let resolver = DefaultResolver::default(); 38 | 39 | // Construct an agent from the parts 40 | let agent = Agent::with_parts(config, chain, resolver); 41 | 42 | // This request will use the MpscTransport to communicate with the fake server. 43 | // If you change this to "https", the RustlsConnector will be used, but the 44 | // fake server is not made to handle TLS. 45 | let mut res = agent.get("http://example.com").call().unwrap(); 46 | 47 | println!( 48 | "CLIENT got response: {:?}", 49 | res.body_mut().read_to_string().unwrap() 50 | ); 51 | } 52 | 53 | #[derive(Debug, Default)] 54 | pub struct MpscConnector { 55 | server_side: Arc>>, 56 | } 57 | 58 | impl Connector for MpscConnector { 59 | type Out = MpscTransport; 60 | 61 | fn connect( 62 | &self, 63 | details: &ConnectionDetails, 64 | _: Option, 65 | ) -> Result, Error> { 66 | println!( 67 | "Making an mpsc connection to {:?} (with addrs: {:?})", 68 | details.uri, 69 | // The default resolver does resolve this to some IP addresses. 70 | &details.addrs[..] 71 | ); 72 | 73 | let (txrx1, txrx2) = TxRx::pair(); 74 | 75 | let transport = MpscTransport::new(txrx1, 1024, 1024); 76 | 77 | // This is how we pass the server_side TxRx to the server thread. 78 | // A more realistic example would not do this. 79 | { 80 | let mut server_side = self.server_side.lock().unwrap(); 81 | *server_side = Some(txrx2); 82 | } 83 | 84 | Ok(Some(transport)) 85 | } 86 | } 87 | 88 | /// A pair of channels for transmitting and receiving data. 89 | /// 90 | /// These will be connected to another such pair. 91 | #[derive(Debug)] 92 | pub struct TxRx { 93 | tx: mpsc::SyncSender>, 94 | // The Mutex here us unfortunate for this example since we are not using rx in 95 | // a "Sync way", but we also don't want to make an unsafe impl Sync to risk 96 | // having the repo flagged as unsafe by overzealous compliance tools. 97 | rx: Mutex>>, 98 | alive: bool, 99 | } 100 | 101 | impl TxRx { 102 | pub fn pair() -> (TxRx, TxRx) { 103 | let (tx1, rx1) = mpsc::sync_channel(10); 104 | let (tx2, rx2) = mpsc::sync_channel(10); 105 | (TxRx::new(tx1, rx2), TxRx::new(tx2, rx1)) 106 | } 107 | 108 | fn new(tx: mpsc::SyncSender>, rx: mpsc::Receiver>) -> Self { 109 | Self { 110 | tx, 111 | rx: Mutex::new(rx), 112 | alive: true, 113 | } 114 | } 115 | 116 | pub fn send(&mut self, data: Vec) { 117 | if let Err(e) = self.tx.send(data) { 118 | println!("Failed to send data: {}", e); 119 | self.alive = false; 120 | } 121 | } 122 | 123 | pub fn recv(&mut self) -> Option> { 124 | let rx = self.rx.lock().unwrap(); 125 | match rx.recv() { 126 | Ok(data) => Some(data), 127 | Err(e) => { 128 | println!("Failed to receive data: {}", e); 129 | self.alive = false; 130 | None 131 | } 132 | } 133 | } 134 | 135 | pub fn is_alive(&self) -> bool { 136 | self.alive 137 | } 138 | } 139 | 140 | /// A transport over TxRx channel. 141 | #[derive(Debug)] 142 | pub struct MpscTransport { 143 | buffers: LazyBuffers, 144 | txrx: TxRx, 145 | } 146 | 147 | impl MpscTransport { 148 | pub fn new(txrx: TxRx, input_buffer_size: usize, output_buffer_size: usize) -> Self { 149 | Self { 150 | buffers: LazyBuffers::new(input_buffer_size, output_buffer_size), 151 | txrx, 152 | } 153 | } 154 | } 155 | 156 | impl Transport for MpscTransport { 157 | fn buffers(&mut self) -> &mut dyn Buffers { 158 | &mut self.buffers 159 | } 160 | 161 | fn transmit_output(&mut self, amount: usize, _timeout: NextTimeout) -> Result<(), Error> { 162 | // The data to send. Must use the amount to know how much of the buffer 163 | // is relevant. 164 | let to_send = &self.buffers.output()[..amount]; 165 | 166 | // Blocking send until the other side receives it. 167 | self.txrx.send(to_send.to_vec()); 168 | 169 | Ok(()) 170 | } 171 | 172 | fn await_input(&mut self, _timeout: NextTimeout) -> Result { 173 | let Some(data) = self.txrx.recv() else { 174 | return Ok(false); 175 | }; 176 | 177 | // Append the data to the input buffer. 178 | let input = self.buffers.input_append_buf(); 179 | let len = data.len(); 180 | input[..len].copy_from_slice(data.as_slice()); 181 | 182 | // Report how many bytes appended to the input buffer. 183 | self.buffers.input_appended(len); 184 | 185 | // Return true if we made progress, i.e. if we managed to fill the input buffer with any bytes. 186 | Ok(len > 0) 187 | } 188 | 189 | fn is_open(&mut self) -> bool { 190 | self.txrx.is_alive() 191 | } 192 | } 193 | 194 | // A fake HTTP server that responds with "Hello, world!" 195 | fn run_server(server_side: Arc>>) { 196 | // Wait until the server side is present. 197 | let txrx = loop { 198 | // Scope to not hold lock while sleeping 199 | let txrx = { 200 | let mut lock = server_side.lock().unwrap(); 201 | lock.take() 202 | }; 203 | 204 | if let Some(txrx) = txrx { 205 | break txrx; 206 | } 207 | 208 | thread::sleep(Duration::from_millis(100)); 209 | }; 210 | 211 | // No contention on this lock. See above why we even need it.e 212 | let rx = txrx.rx.lock().unwrap(); 213 | 214 | let mut incoming = String::new(); 215 | 216 | // We are not guaranteed to receive the entire request in one go. 217 | // Loop until we know we have it. 218 | loop { 219 | let data = rx.recv().unwrap(); 220 | 221 | let s = String::from_utf8_lossy(&data); 222 | incoming.push_str(&s); 223 | 224 | if incoming.contains("\r\n\r\n") { 225 | break; 226 | } 227 | } 228 | 229 | println!("SERVER received request: {:?}", incoming); 230 | 231 | // A random response. 232 | let response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; 233 | 234 | println!("SERVER sending response: {:?}", response); 235 | 236 | txrx.tx.send(response.as_bytes().to_vec()).unwrap(); 237 | } 238 | -------------------------------------------------------------------------------- /examples/proxy.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use ureq::tls::TlsConfig; 4 | use ureq::{config::Config, Agent, Proxy}; 5 | 6 | // Use this example with something like mitmproxy 7 | // $ mitmproxy --listen-port 8080 8 | 9 | fn main() -> Result<(), Box> { 10 | let proxy = Proxy::new("http://localhost:8080")?; 11 | 12 | let config = Config::builder() 13 | .tls_config( 14 | TlsConfig::builder() 15 | // The mitmproxy uses a certificate authority we 16 | // don't know. Do not disable verification in 17 | // production use. 18 | .disable_verification(true) 19 | .build(), 20 | ) 21 | .proxy(Some(proxy)) 22 | .build(); 23 | let agent = Agent::new_with_config(config); 24 | 25 | let _ = agent.get("https://example.com").call()?; 26 | 27 | Ok(()) 28 | } 29 | -------------------------------------------------------------------------------- /src/body/brotli.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use brotli_decompressor::Decompressor; 4 | 5 | use crate::error::is_wrapped_ureq_error; 6 | use crate::Error; 7 | 8 | pub(crate) struct BrotliDecoder(Decompressor); 9 | 10 | impl BrotliDecoder { 11 | pub fn new(reader: R) -> Self { 12 | BrotliDecoder(Decompressor::new(reader, 4096)) 13 | } 14 | } 15 | 16 | impl io::Read for BrotliDecoder { 17 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 18 | self.0.read(buf).map_err(|e| { 19 | if is_wrapped_ureq_error(&e) { 20 | // If this already is a ureq::Error, like Timeout, pass it along. 21 | e 22 | } else { 23 | Error::Decompress("brotli", e).into_io() 24 | } 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/body/build.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, Cursor}; 2 | use std::sync::Arc; 3 | 4 | use ureq_proto::BodyMode; 5 | 6 | use super::{Body, BodyDataSource, ContentEncoding, ResponseInfo}; 7 | 8 | /// Builder for creating a response body. 9 | /// 10 | /// This is useful for testing, or for [`Middleware`][crate::middleware::Middleware] that 11 | /// returns another body than the requested one. 12 | /// 13 | /// # Example 14 | /// 15 | /// ``` 16 | /// use ureq::Body; 17 | /// use ureq::http::Response; 18 | /// 19 | /// let body = Body::builder() 20 | /// .mime_type("text/plain") 21 | /// .charset("utf-8") 22 | /// .data("Hello world!"); 23 | /// 24 | /// let mut response = Response::builder() 25 | /// .status(200) 26 | /// .header("content-type", "text/plain; charset=utf-8") 27 | /// .body(body)?; 28 | /// 29 | /// let text = response 30 | /// .body_mut() 31 | /// .read_to_string()?; 32 | /// 33 | /// assert_eq!(text, "Hello world!"); 34 | /// # Ok::<_, ureq::Error>(()) 35 | /// ``` 36 | pub struct BodyBuilder { 37 | info: ResponseInfo, 38 | limit: Option, 39 | } 40 | 41 | impl BodyBuilder { 42 | pub(crate) fn new() -> Self { 43 | BodyBuilder { 44 | info: ResponseInfo { 45 | content_encoding: ContentEncoding::None, 46 | mime_type: None, 47 | charset: None, 48 | body_mode: BodyMode::NoBody, 49 | }, 50 | limit: None, 51 | } 52 | } 53 | 54 | /// Set the mime type of the body. 55 | /// 56 | /// **This does not set any HTTP headers. Affects Body decoding.** 57 | /// 58 | /// ``` 59 | /// use ureq::Body; 60 | /// 61 | /// let body = Body::builder() 62 | /// .mime_type("text/plain") 63 | /// .data("Hello world!"); 64 | /// ``` 65 | pub fn mime_type(mut self, mime_type: impl Into) -> Self { 66 | self.info.mime_type = Some(mime_type.into()); 67 | self 68 | } 69 | 70 | /// Set the mime type of the body 71 | /// 72 | /// **This does not set any HTTP headers. Affects Body decoding.** 73 | /// 74 | /// ``` 75 | /// use ureq::Body; 76 | /// 77 | /// let body = Body::builder() 78 | /// .mime_type("text/plain") 79 | /// .charset("utf-8") 80 | /// .data("Hello world!"); 81 | /// ``` 82 | pub fn charset(mut self, charset: impl Into) -> Self { 83 | self.info.charset = Some(charset.into()); 84 | self 85 | } 86 | 87 | /// Limit how much data is to be released from the body. 88 | /// 89 | /// **This does not set any HTTP headers. Affects Body decoding.** 90 | /// 91 | /// ``` 92 | /// use ureq::Body; 93 | /// 94 | /// let body = Body::builder() 95 | /// .mime_type("text/plain") 96 | /// .charset("utf-8") 97 | /// .limit(5) 98 | /// // This will be limited to "Hello" 99 | /// .data("Hello world!"); 100 | /// ``` 101 | pub fn limit(mut self, l: u64) -> Self { 102 | self.limit = Some(l); 103 | self 104 | } 105 | 106 | /// Creates the body data turned into bytes. 107 | /// 108 | /// Will automatically limit the body reader to the lenth of the data. 109 | pub fn data(mut self, data: impl Into>) -> Body { 110 | let data: Vec = data.into(); 111 | 112 | let len = self.limit.unwrap_or(data.len() as u64); 113 | self.info.body_mode = BodyMode::LengthDelimited(len); 114 | 115 | self.reader(Cursor::new(data)) 116 | } 117 | 118 | /// Creates a body from a streaming reader. 119 | /// 120 | /// The reader can be limited by using `.limit()` or that the reader 121 | /// reaches the end. 122 | pub fn reader(self, data: impl io::Read + Send + Sync + 'static) -> Body { 123 | Body { 124 | source: BodyDataSource::Reader(Box::new(data)), 125 | info: Arc::new(self.info), 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/body/charset.rs: -------------------------------------------------------------------------------- 1 | use encoding_rs::{Decoder, Encoder, Encoding}; 2 | use std::fmt; 3 | use std::io::{self, BufRead, BufReader}; 4 | 5 | use crate::util::ConsumeBuf; 6 | 7 | const MAX_OUTPUT: usize = 4096; 8 | 9 | /// Charset transcoder 10 | pub(crate) struct CharCodec { 11 | reader: BufReader, 12 | dec: Option, 13 | enc: Option, 14 | buf: ConsumeBuf, 15 | reached_end: bool, 16 | } 17 | 18 | impl CharCodec 19 | where 20 | R: io::Read, 21 | { 22 | pub fn new(reader: R, from: &'static Encoding, to: &'static Encoding) -> Self { 23 | CharCodec { 24 | reader: BufReader::new(reader), 25 | dec: Some(from.new_decoder()), 26 | enc: if to == encoding_rs::UTF_8 { 27 | None 28 | } else { 29 | Some(to.new_encoder()) 30 | }, 31 | buf: ConsumeBuf::new(MAX_OUTPUT), 32 | reached_end: false, 33 | } 34 | } 35 | } 36 | 37 | impl io::Read for CharCodec { 38 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 39 | if self.reached_end && self.buf.unconsumed().is_empty() { 40 | return Ok(0); 41 | } 42 | 43 | let input = 'read: { 44 | if self.buf.unconsumed().len() > MAX_OUTPUT / 4 { 45 | // Do not keep filling if we have unused output. 46 | break 'read self.reader.buffer(); 47 | } 48 | 49 | let tmp = self.reader.fill_buf()?; 50 | let tmp_len = tmp.len(); 51 | if tmp_len >= 4 { 52 | // We need some minimum input to make progress. 53 | break 'read tmp; 54 | } 55 | 56 | let tmp2 = self.reader.fill_buf()?; 57 | if tmp2.len() == tmp_len { 58 | // Made no progress. That means we reached the end. 59 | self.reached_end = true; 60 | } 61 | 62 | tmp2 63 | }; 64 | 65 | if self.buf.free_mut().len() < 4 { 66 | self.buf.add_space(1024); 67 | } 68 | let output = self.buf.free_mut(); 69 | 70 | if let Some(dec) = &mut self.dec { 71 | let (_, input_used, output_used, _had_errors) = 72 | dec.decode_to_utf8(input, output, self.reached_end); 73 | 74 | self.reader.consume(input_used); 75 | self.buf.add_filled(output_used); 76 | 77 | if self.reached_end { 78 | // Can't be used again 79 | self.dec = None; 80 | } 81 | } 82 | 83 | // guaranteed to be on a char boundary by encoding_rs 84 | let bytes = self.buf.unconsumed(); 85 | 86 | let amount = if let Some(enc) = &mut self.enc { 87 | // unwrap is ok because it is on a char boundary, and non-utf8 chars have been replaced 88 | let utf8 = std::str::from_utf8(bytes).unwrap(); 89 | let (_, input_used, output_used, _) = enc.encode_from_utf8(utf8, buf, self.reached_end); 90 | self.buf.consume(input_used); 91 | 92 | if self.reached_end { 93 | // Can't be used again 94 | self.enc = None; 95 | } 96 | 97 | output_used 98 | } else { 99 | // No encoder, we want utf8 100 | let max = bytes.len().min(buf.len()); 101 | buf[..max].copy_from_slice(&bytes[..max]); 102 | self.buf.consume(max); 103 | max 104 | }; 105 | 106 | Ok(amount) 107 | } 108 | } 109 | 110 | impl fmt::Debug for CharCodec { 111 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 112 | write!( 113 | f, 114 | "CharCodec {{ from: {}, to: {} }}", 115 | self.dec 116 | .as_ref() 117 | .map(|d| d.encoding().name()) 118 | .unwrap_or(encoding_rs::UTF_8.name()), 119 | self.enc 120 | .as_ref() 121 | .map(|e| e.encoding()) 122 | .unwrap_or(encoding_rs::UTF_8) 123 | .name() 124 | ) 125 | } 126 | } 127 | 128 | #[cfg(all(test, feature = "_test"))] 129 | mod test { 130 | use super::*; 131 | 132 | #[test] 133 | fn create_encodings() { 134 | assert!(Encoding::for_label(b"utf8").is_some()); 135 | assert_eq!(Encoding::for_label(b"utf8"), Encoding::for_label(b"utf-8")); 136 | } 137 | 138 | #[test] 139 | #[cfg(feature = "charset")] 140 | fn non_ascii_reason() { 141 | use crate::test::init_test_log; 142 | use crate::Agent; 143 | 144 | init_test_log(); 145 | let agent: Agent = Agent::config_builder().max_redirects(0).build().into(); 146 | 147 | let res = agent 148 | .get("https://my.test/non-ascii-reason") 149 | .call() 150 | .unwrap(); 151 | assert_eq!(res.status(), 302); 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/body/gzip.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use flate2::read::MultiGzDecoder; 4 | 5 | use crate::error::is_wrapped_ureq_error; 6 | use crate::Error; 7 | 8 | pub(crate) struct GzipDecoder(MultiGzDecoder); 9 | 10 | impl GzipDecoder { 11 | pub fn new(reader: R) -> Self { 12 | GzipDecoder(MultiGzDecoder::new(reader)) 13 | } 14 | } 15 | 16 | impl io::Read for GzipDecoder { 17 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 18 | self.0.read(buf).map_err(|e| { 19 | if is_wrapped_ureq_error(&e) { 20 | // If this already is a ureq::Error, like Timeout, pass it along. 21 | e 22 | } else { 23 | Error::Decompress("gzip", e).into_io() 24 | } 25 | }) 26 | } 27 | } 28 | 29 | #[cfg(all(test, feature = "_test"))] 30 | mod test { 31 | use crate::test::init_test_log; 32 | use crate::transport::set_handler; 33 | use crate::Agent; 34 | 35 | // Test that a stream gets returned to the pool if it is gzip encoded and the gzip 36 | // decoder reads the exact amount from a chunked stream, not past the 0. This 37 | // happens because gzip has built-in knowledge of the length to read. 38 | #[test] 39 | fn gz_internal_length() { 40 | init_test_log(); 41 | 42 | let gz_body = vec![ 43 | b'E', b'\r', b'\n', // 14 first chunk 44 | 0x1F, 0x8B, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x03, 0xCB, 0x48, 0xCD, 0xC9, 45 | b'\r', b'\n', // 46 | b'E', b'\r', b'\n', // 14 second chunk 47 | 0xC9, 0x57, 0x28, 0xCF, 0x2F, 0xCA, 0x49, 0x51, 0xC8, 0x18, 0xBC, 0x6C, 0x00, 0xA5, 48 | b'\r', b'\n', // 49 | b'7', b'\r', b'\n', // 7 third chunk 50 | 0x5C, 0x7C, 0xEF, 0xA7, 0x00, 0x00, 0x00, // 51 | b'\r', b'\n', // 52 | // end 53 | b'0', b'\r', b'\n', // 54 | b'\r', b'\n', // 55 | ]; 56 | 57 | let agent = Agent::new_with_defaults(); 58 | assert_eq!(agent.pool_count(), 0); 59 | 60 | set_handler( 61 | "/gz_body", 62 | 200, 63 | &[ 64 | ("transfer-encoding", "chunked"), 65 | ("content-encoding", "gzip"), 66 | ], 67 | &gz_body, 68 | ); 69 | 70 | let mut res = agent.get("https://example.test/gz_body").call().unwrap(); 71 | res.body_mut().read_to_string().unwrap(); 72 | 73 | assert_eq!(agent.pool_count(), 1); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/body/limit.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use crate::Error; 4 | 5 | pub(crate) struct LimitReader { 6 | reader: R, 7 | limit: u64, 8 | left: u64, 9 | } 10 | 11 | impl LimitReader { 12 | pub fn new(reader: R, limit: u64) -> Self { 13 | LimitReader { 14 | reader, 15 | limit, 16 | left: limit, 17 | } 18 | } 19 | } 20 | 21 | impl io::Read for LimitReader { 22 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 23 | if self.left == 0 { 24 | return Err(Error::BodyExceedsLimit(self.limit).into_io()); 25 | } 26 | 27 | // The max buffer size is usize, which may be 32 bit. 28 | let max = (self.left.min(usize::MAX as u64) as usize).min(buf.len()); 29 | 30 | let n = self.reader.read(&mut buf[..max])?; 31 | 32 | self.left -= n as u64; 33 | 34 | Ok(n) 35 | } 36 | } 37 | 38 | #[cfg(all(test, feature = "_test"))] 39 | mod test { 40 | use std::io; 41 | 42 | use crate::test::init_test_log; 43 | use crate::transport::set_handler; 44 | use crate::Error; 45 | 46 | #[test] 47 | fn short_read() { 48 | init_test_log(); 49 | set_handler("/get", 200, &[("content-length", "10")], b"hello"); 50 | let mut res = crate::get("https://my.test/get").call().unwrap(); 51 | let err = res.body_mut().read_to_string().unwrap_err(); 52 | let ioe = err.into_io(); 53 | assert_eq!(ioe.kind(), io::ErrorKind::UnexpectedEof); 54 | } 55 | 56 | #[test] 57 | fn limit_below_size() { 58 | init_test_log(); 59 | set_handler("/get", 200, &[("content-length", "5")], b"hello"); 60 | let mut res = crate::get("https://my.test/get").call().unwrap(); 61 | let err = res 62 | .body_mut() 63 | .with_config() 64 | .limit(3) 65 | .read_to_string() 66 | .unwrap_err(); 67 | println!("{:?}", err); 68 | assert!(matches!(err, Error::BodyExceedsLimit(3))); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/body/lossy.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use utf8::DecodeError; 4 | 5 | use crate::util::ConsumeBuf; 6 | 7 | const REPLACEMENT_CHAR: u8 = b'?'; 8 | const MIN_BUF: usize = 8; 9 | 10 | pub struct LossyUtf8Reader { 11 | reader: R, 12 | ended: bool, 13 | input: ConsumeBuf, 14 | valid_len: usize, 15 | } 16 | impl LossyUtf8Reader { 17 | pub(crate) fn new(reader: R) -> Self { 18 | Self { 19 | reader, 20 | ended: false, 21 | input: ConsumeBuf::new(8), 22 | valid_len: 0, 23 | } 24 | } 25 | 26 | fn process_input(&mut self) -> usize { 27 | match utf8::decode(self.input.unconsumed()) { 28 | Ok(_) => { 29 | // Entire input is valid 30 | self.input.unconsumed().len() 31 | } 32 | Err(e) => match e { 33 | DecodeError::Invalid { 34 | valid_prefix, 35 | invalid_sequence, 36 | .. 37 | } => { 38 | let valid_len = valid_prefix.len(); 39 | let invalid_len = invalid_sequence.len(); 40 | 41 | // Switch out the problem input chars 42 | let replace_in = self.input.unconsumed_mut(); 43 | for i in 0..invalid_len { 44 | replace_in[valid_len + i] = REPLACEMENT_CHAR; 45 | } 46 | 47 | valid_len + invalid_len 48 | } 49 | DecodeError::Incomplete { valid_prefix, .. } => { 50 | let valid_len = valid_prefix.len(); 51 | 52 | if self.ended { 53 | // blank the rest 54 | let replace_in = self.input.unconsumed_mut(); 55 | let invalid_len = replace_in.len() - valid_len; 56 | for i in 0..invalid_len { 57 | replace_in[valid_len + i] = REPLACEMENT_CHAR; 58 | } 59 | valid_len + invalid_len 60 | } else { 61 | valid_len 62 | } 63 | } 64 | }, 65 | } 66 | } 67 | } 68 | 69 | impl io::Read for LossyUtf8Reader { 70 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 71 | // Match the input buffer size 72 | if !self.ended { 73 | let total_len = self.input.unconsumed().len() + self.input.free_mut().len(); 74 | let wanted_len = buf.len().max(MIN_BUF); 75 | if wanted_len < total_len { 76 | self.input.add_space(total_len - wanted_len); 77 | } 78 | } 79 | 80 | // Fill up to a point where we definitely will make progress. 81 | while !self.ended && self.input.unconsumed().len() < MIN_BUF { 82 | let amount = self.reader.read(self.input.free_mut())?; 83 | self.input.add_filled(amount); 84 | 85 | if amount == 0 { 86 | self.ended = true; 87 | } 88 | } 89 | 90 | if self.ended && self.input.unconsumed().is_empty() { 91 | return Ok(0); 92 | } 93 | 94 | if self.valid_len == 0 { 95 | self.valid_len = self.process_input(); 96 | assert!(self.valid_len > 0); 97 | } 98 | 99 | let src = &self.input.unconsumed()[..self.valid_len]; 100 | let max = src.len().min(buf.len()); 101 | buf[..max].copy_from_slice(&src[..max]); 102 | self.input.consume(max); 103 | 104 | self.valid_len -= max; 105 | 106 | Ok(max) 107 | } 108 | } 109 | 110 | #[cfg(test)] 111 | mod test { 112 | use std::io::Read; 113 | 114 | use super::*; 115 | 116 | fn do_reader<'a>(bytes: &'a mut [&'a [u8]]) -> String { 117 | let mut r = LossyUtf8Reader::new(TestReader(bytes)); 118 | let mut buf = String::new(); 119 | r.read_to_string(&mut buf).unwrap(); 120 | buf 121 | } 122 | 123 | #[test] 124 | fn ascii() { 125 | assert_eq!(do_reader(&mut [b"abc123"]), "abc123"); 126 | } 127 | 128 | #[test] 129 | fn utf8_one_read() { 130 | assert_eq!(do_reader(&mut ["åiåaäeö".as_bytes()]), "åiåaäeö"); 131 | } 132 | 133 | #[test] 134 | fn utf8_chopped_single_char() { 135 | assert_eq!(do_reader(&mut [&[195], &[165]]), "å"); 136 | } 137 | 138 | #[test] 139 | fn utf8_chopped_prefix_ascii() { 140 | assert_eq!(do_reader(&mut [&[97, 97, 97, 195], &[165]]), "aaaå"); 141 | } 142 | 143 | #[test] 144 | fn utf8_chopped_suffix_ascii() { 145 | assert_eq!(do_reader(&mut [&[195], &[165, 97, 97, 97]]), "åaaa"); 146 | } 147 | 148 | #[test] 149 | fn utf8_broken_single() { 150 | assert_eq!(do_reader(&mut [&[195]]), "?"); 151 | } 152 | 153 | #[test] 154 | fn utf8_broken_suffix_ascii() { 155 | assert_eq!(do_reader(&mut [&[195, 97, 97, 97]]), "?aaa"); 156 | } 157 | 158 | #[test] 159 | fn utf8_broken_prefix_ascii() { 160 | assert_eq!(do_reader(&mut [&[97, 97, 97, 195]]), "aaa?"); 161 | } 162 | 163 | #[test] 164 | fn hiragana() { 165 | assert_eq!(do_reader(&mut ["あいうえお".as_bytes()]), "あいうえお"); 166 | } 167 | 168 | #[test] 169 | fn emoji() { 170 | assert_eq!(do_reader(&mut ["✅✅✅".as_bytes()]), "✅✅✅"); 171 | } 172 | 173 | #[test] 174 | fn leftover() { 175 | let s = "あ"; 176 | assert_eq!(s.as_bytes(), &[227, 129, 130]); 177 | 178 | let mut buf = [0; 2]; 179 | let mut r = LossyUtf8Reader::new(s.as_bytes()); 180 | 181 | assert_eq!(r.read(&mut buf).unwrap(), 2); 182 | assert_eq!(&buf[..], &[227, 129]); 183 | 184 | assert_eq!(r.read(&mut buf).unwrap(), 1); 185 | assert_eq!(&buf[..1], &[130]); 186 | 187 | assert_eq!(r.read(&mut buf).unwrap(), 0); 188 | } 189 | 190 | struct TestReader<'a>(&'a mut [&'a [u8]]); 191 | 192 | impl<'a> io::Read for TestReader<'a> { 193 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 194 | if self.0.iter().all(|c| c.is_empty()) { 195 | return Ok(0); 196 | } 197 | 198 | let pos = self.0.iter().position(|c| !c.is_empty()).unwrap(); 199 | let cur = &self.0[pos]; 200 | 201 | let max = cur.len().min(buf.len()); 202 | buf[..max].copy_from_slice(&cur[..max]); 203 | 204 | self.0[pos] = &cur[max..]; 205 | 206 | Ok(max) 207 | } 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/cookies.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::fmt; 3 | use std::iter; 4 | use std::sync::{Mutex, MutexGuard}; 5 | 6 | use cookie_store::CookieStore; 7 | use http::Uri; 8 | 9 | use crate::http; 10 | use crate::util::UriExt; 11 | use crate::Error; 12 | 13 | #[cfg(feature = "json")] 14 | use std::io; 15 | 16 | #[derive(Debug)] 17 | pub(crate) struct SharedCookieJar { 18 | inner: Mutex, 19 | } 20 | 21 | /// Collection of cookies. 22 | /// 23 | /// The jar is accessed using [`Agent::cookie_jar_lock`][crate::Agent::cookie_jar_lock]. 24 | /// It can be saved and loaded. 25 | pub struct CookieJar<'a>(MutexGuard<'a, CookieStore>); 26 | 27 | /// Representation of an HTTP cookie. 28 | /// 29 | /// Conforms to [IETF RFC6265](https://datatracker.ietf.org/doc/html/rfc6265) 30 | /// 31 | /// ## Constructing a `Cookie` 32 | /// 33 | /// To construct a cookie it must be parsed and bound to a uri: 34 | /// 35 | /// ``` 36 | /// use ureq::Cookie; 37 | /// use ureq::http::Uri; 38 | /// 39 | /// let uri = Uri::from_static("https://my.server.com"); 40 | /// let cookie = Cookie::parse("name=value", &uri)?; 41 | /// assert_eq!(cookie.to_string(), "name=value"); 42 | /// # Ok::<_, ureq::Error>(()) 43 | /// ``` 44 | pub struct Cookie<'a>(CookieInner<'a>); 45 | 46 | #[allow(clippy::large_enum_variant)] 47 | enum CookieInner<'a> { 48 | Borrowed(&'a cookie_store::Cookie<'a>), 49 | Owned(cookie_store::Cookie<'a>), 50 | } 51 | 52 | impl<'a> CookieInner<'a> { 53 | fn into_static(self) -> cookie_store::Cookie<'static> { 54 | match self { 55 | CookieInner::Borrowed(v) => v.clone().into_owned(), 56 | CookieInner::Owned(v) => v.into_owned(), 57 | } 58 | } 59 | } 60 | 61 | impl<'a> Cookie<'a> { 62 | /// Parses a new [`Cookie`] from a string 63 | pub fn parse(cookie_str: S, uri: &Uri) -> Result, Error> 64 | where 65 | S: Into>, 66 | { 67 | let cookie = cookie_store::Cookie::parse(cookie_str, &uri.try_into_url()?)?; 68 | Ok(Cookie(CookieInner::Owned(cookie))) 69 | } 70 | 71 | /// The cookie's name. 72 | pub fn name(&self) -> &str { 73 | match &self.0 { 74 | CookieInner::Borrowed(v) => v.name(), 75 | CookieInner::Owned(v) => v.name(), 76 | } 77 | } 78 | 79 | /// The cookie's value. 80 | pub fn value(&self) -> &str { 81 | match &self.0 { 82 | CookieInner::Borrowed(v) => v.value(), 83 | CookieInner::Owned(v) => v.value(), 84 | } 85 | } 86 | 87 | #[cfg(test)] 88 | fn as_cookie_store(&self) -> &cookie_store::Cookie<'a> { 89 | match &self.0 { 90 | CookieInner::Borrowed(v) => v, 91 | CookieInner::Owned(v) => v, 92 | } 93 | } 94 | } 95 | 96 | impl Cookie<'static> { 97 | fn into_owned(self) -> cookie_store::Cookie<'static> { 98 | match self.0 { 99 | CookieInner::Owned(v) => v, 100 | _ => unreachable!(), 101 | } 102 | } 103 | } 104 | 105 | impl<'a> CookieJar<'a> { 106 | /// Returns a reference to the __unexpired__ `Cookie` corresponding to the specified `domain`, 107 | /// `path`, and `name`. 108 | pub fn get(&self, domain: &str, path: &str, name: &str) -> Option> { 109 | self.0 110 | .get(domain, path, name) 111 | .map(|c| Cookie(CookieInner::Borrowed(c))) 112 | } 113 | 114 | /// Removes a `Cookie` from the jar, returning the `Cookie` if it was in the jar 115 | pub fn remove(&mut self, domain: &str, path: &str, name: &str) -> Option> { 116 | self.0 117 | .remove(domain, path, name) 118 | .map(|c| Cookie(CookieInner::Owned(c))) 119 | } 120 | 121 | /// Inserts `cookie`, received from `uri`, into the jar, following the rules of the 122 | /// [IETF RFC6265 Storage Model](https://datatracker.ietf.org/doc/html/rfc6265#section-5.3). 123 | pub fn insert(&mut self, cookie: Cookie<'static>, uri: &Uri) -> Result<(), Error> { 124 | let url = uri.try_into_url()?; 125 | self.0.insert(cookie.into_owned(), &url)?; 126 | Ok(()) 127 | } 128 | 129 | /// Clear the contents of the jar 130 | pub fn clear(&mut self) { 131 | self.0.clear() 132 | } 133 | 134 | /// An iterator visiting all the __unexpired__ cookies in the jar 135 | pub fn iter(&self) -> impl Iterator> { 136 | self.0 137 | .iter_unexpired() 138 | .map(|c| Cookie(CookieInner::Borrowed(c))) 139 | } 140 | 141 | /// Serialize any __unexpired__ and __persistent__ cookies in the jar to JSON format and 142 | /// write them to `writer` 143 | #[cfg(feature = "json")] 144 | pub fn save_json(&self, writer: &mut W) -> Result<(), Error> { 145 | Ok(cookie_store::serde::json::save(&self.0, writer)?) 146 | } 147 | 148 | /// Load JSON-formatted cookies from `reader`, skipping any __expired__ cookies 149 | /// 150 | /// Replaces all the contents of the current cookie jar. 151 | #[cfg(feature = "json")] 152 | pub fn load_json(&mut self, reader: R) -> Result<(), Error> { 153 | let store = cookie_store::serde::json::load(reader)?; 154 | *self.0 = store; 155 | Ok(()) 156 | } 157 | 158 | pub(crate) fn store_response_cookies<'b>( 159 | &mut self, 160 | iter: impl Iterator>, 161 | uri: &Uri, 162 | ) { 163 | let url = uri.try_into_url().expect("uri to be a url"); 164 | let raw_cookies = iter.map(|c| c.0.into_static().into()); 165 | self.0.store_response_cookies(raw_cookies, &url); 166 | } 167 | 168 | /// Release the cookie jar. 169 | pub fn release(self) {} 170 | } 171 | 172 | // CookieStore::new() changes parameters depending on feature flag "public_suffix". 173 | // That means if a user enables public_suffix for CookieStore through diamond dependency, 174 | // we start having compilation errors un ureq. 175 | // 176 | // This workaround instantiates a CookieStore in a way that does not change with flags. 177 | fn instantiate_cookie_store() -> CookieStore { 178 | let i = iter::empty::, &str>>(); 179 | CookieStore::from_cookies(i, true).unwrap() 180 | } 181 | 182 | impl SharedCookieJar { 183 | pub(crate) fn new() -> Self { 184 | SharedCookieJar { 185 | inner: Mutex::new(instantiate_cookie_store()), 186 | } 187 | } 188 | 189 | pub(crate) fn lock(&self) -> CookieJar<'_> { 190 | let lock = self.inner.lock().unwrap(); 191 | CookieJar(lock) 192 | } 193 | 194 | pub(crate) fn get_request_cookies(&self, uri: &Uri) -> String { 195 | let mut cookies = String::new(); 196 | 197 | let url = match uri.try_into_url() { 198 | Ok(v) => v, 199 | Err(e) => { 200 | debug!("Bad url for cookie: {:?}", e); 201 | return cookies; 202 | } 203 | }; 204 | 205 | let store = self.inner.lock().unwrap(); 206 | 207 | for c in store.matches(&url) { 208 | if !is_cookie_rfc_compliant(c) { 209 | debug!("Do not send non compliant cookie: {:?}", c.name()); 210 | continue; 211 | } 212 | 213 | if !cookies.is_empty() { 214 | cookies.push(';'); 215 | } 216 | 217 | cookies.push_str(&c.to_string()); 218 | } 219 | 220 | cookies 221 | } 222 | } 223 | 224 | fn is_cookie_rfc_compliant(cookie: &cookie_store::Cookie) -> bool { 225 | // https://tools.ietf.org/html/rfc6265#page-9 226 | // set-cookie-header = "Set-Cookie:" SP set-cookie-string 227 | // set-cookie-string = cookie-pair *( ";" SP cookie-av ) 228 | // cookie-pair = cookie-name "=" cookie-value 229 | // cookie-name = token 230 | // cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) 231 | // cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E 232 | // ; US-ASCII characters excluding CTLs, 233 | // ; whitespace DQUOTE, comma, semicolon, 234 | // ; and backslash 235 | // token = 236 | 237 | // https://tools.ietf.org/html/rfc2616#page-17 238 | // CHAR = 239 | // ... 240 | // CTL = 242 | // ... 243 | // token = 1* 244 | // separators = "(" | ")" | "<" | ">" | "@" 245 | // | "," | ";" | ":" | "\" | <"> 246 | // | "/" | "[" | "]" | "?" | "=" 247 | // | "{" | "}" | SP | HT 248 | 249 | fn is_valid_name(b: &u8) -> bool { 250 | is_tchar(b) 251 | } 252 | 253 | fn is_valid_value(b: &u8) -> bool { 254 | b.is_ascii() 255 | && !b.is_ascii_control() 256 | && !b.is_ascii_whitespace() 257 | && *b != b'"' 258 | && *b != b',' 259 | && *b != b';' 260 | && *b != b'\\' 261 | } 262 | 263 | let name = cookie.name().as_bytes(); 264 | 265 | let valid_name = name.iter().all(is_valid_name); 266 | 267 | if !valid_name { 268 | log::trace!("cookie name is not valid: {:?}", cookie.name()); 269 | return false; 270 | } 271 | 272 | let value = cookie.value().as_bytes(); 273 | 274 | let valid_value = value 275 | .strip_prefix(br#"""#) 276 | .and_then(|value| value.strip_suffix(br#"""#)) 277 | .unwrap_or(value) 278 | .iter() 279 | .all(is_valid_value); 280 | 281 | if !valid_value { 282 | // NB. Do not log cookie value since it might be secret 283 | log::trace!("cookie value is not valid: {:?}", cookie.name()); 284 | return false; 285 | } 286 | 287 | true 288 | } 289 | 290 | #[inline] 291 | pub(crate) fn is_tchar(b: &u8) -> bool { 292 | match b { 293 | b'!' | b'#' | b'$' | b'%' | b'&' => true, 294 | b'\'' | b'*' | b'+' | b'-' | b'.' => true, 295 | b'^' | b'_' | b'`' | b'|' | b'~' => true, 296 | b if b.is_ascii_alphanumeric() => true, 297 | _ => false, 298 | } 299 | } 300 | 301 | impl fmt::Display for Cookie<'_> { 302 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 303 | match &self.0 { 304 | CookieInner::Borrowed(v) => v.fmt(f), 305 | CookieInner::Owned(v) => v.fmt(f), 306 | } 307 | } 308 | } 309 | 310 | #[cfg(test)] 311 | mod test { 312 | 313 | use std::convert::TryFrom; 314 | 315 | use super::*; 316 | 317 | fn uri() -> Uri { 318 | Uri::try_from("https://example.test").unwrap() 319 | } 320 | 321 | #[test] 322 | fn illegal_cookie_name() { 323 | let cookie = Cookie::parse("borked/=value", &uri()).unwrap(); 324 | assert!(!is_cookie_rfc_compliant(cookie.as_cookie_store())); 325 | } 326 | 327 | #[test] 328 | fn illegal_cookie_value() { 329 | let cookie = Cookie::parse("name=borked,", &uri()).unwrap(); 330 | assert!(!is_cookie_rfc_compliant(cookie.as_cookie_store())); 331 | let cookie = Cookie::parse("name=\"borked", &uri()).unwrap(); 332 | assert!(!is_cookie_rfc_compliant(cookie.as_cookie_store())); 333 | let cookie = Cookie::parse("name=borked\"", &uri()).unwrap(); 334 | assert!(!is_cookie_rfc_compliant(cookie.as_cookie_store())); 335 | let cookie = Cookie::parse("name=\"\"borked\"", &uri()).unwrap(); 336 | assert!(!is_cookie_rfc_compliant(cookie.as_cookie_store())); 337 | } 338 | 339 | #[test] 340 | fn legal_cookie_name_value() { 341 | let cookie = Cookie::parse("name=value", &uri()).unwrap(); 342 | assert!(is_cookie_rfc_compliant(cookie.as_cookie_store())); 343 | let cookie = Cookie::parse("name=\"value\"", &uri()).unwrap(); 344 | assert!(is_cookie_rfc_compliant(cookie.as_cookie_store())); 345 | } 346 | } 347 | -------------------------------------------------------------------------------- /src/middleware.rs: -------------------------------------------------------------------------------- 1 | //! Chained interception to modify the request or response. 2 | 3 | use std::fmt; 4 | use std::sync::Arc; 5 | 6 | use crate::http; 7 | use crate::run::run; 8 | use crate::{Agent, Body, Error, SendBody}; 9 | 10 | /// Chained processing of request (and response). 11 | /// 12 | /// # Middleware as `fn` 13 | /// 14 | /// The middleware trait is implemented for all functions that have the signature 15 | /// 16 | /// `Fn(Request, MiddlewareNext) -> Result` 17 | /// 18 | /// That means the easiest way to implement middleware is by providing a `fn`, like so 19 | /// 20 | /// ``` 21 | /// use ureq::{Body, SendBody}; 22 | /// use ureq::middleware::MiddlewareNext; 23 | /// use ureq::http::{Request, Response}; 24 | /// 25 | /// fn my_middleware(req: Request, next: MiddlewareNext) 26 | /// -> Result, ureq::Error> { 27 | /// 28 | /// // do middleware things to request 29 | /// 30 | /// // continue the middleware chain 31 | /// let res = next.handle(req)?; 32 | /// 33 | /// // do middleware things to response 34 | /// 35 | /// Ok(res) 36 | /// } 37 | /// ``` 38 | /// 39 | /// # Adding headers 40 | /// 41 | /// A common use case is to add headers to the outgoing request. Here an example of how. 42 | /// 43 | /// ```no_run 44 | /// use ureq::{Body, SendBody, Agent, config::Config}; 45 | /// use ureq::middleware::MiddlewareNext; 46 | /// use ureq::http::{Request, Response, header::HeaderValue}; 47 | /// 48 | /// # #[cfg(feature = "json")] 49 | /// # { 50 | /// fn my_middleware(mut req: Request, next: MiddlewareNext) 51 | /// -> Result, ureq::Error> { 52 | /// 53 | /// req.headers_mut().insert("X-My-Header", HeaderValue::from_static("value_42")); 54 | /// 55 | /// // set my bespoke header and continue the chain 56 | /// next.handle(req) 57 | /// } 58 | /// 59 | /// let mut config = Config::builder() 60 | /// .middleware(my_middleware) 61 | /// .build(); 62 | /// 63 | /// let agent: Agent = config.into(); 64 | /// 65 | /// let result: serde_json::Value = 66 | /// agent.get("http://httpbin.org/headers").call()?.body_mut().read_json()?; 67 | /// 68 | /// assert_eq!(&result["headers"]["X-My-Header"], "value_42"); 69 | /// # } Ok::<_, ureq::Error>(()) 70 | /// ``` 71 | /// 72 | /// # State 73 | /// 74 | /// To maintain state between middleware invocations, we need to do something more elaborate than 75 | /// the simple `fn` and implement the `Middleware` trait directly. 76 | /// 77 | /// ## Example with mutex lock 78 | /// 79 | /// In the `examples` directory there is an additional example `count-bytes.rs` which uses 80 | /// a mutex lock like shown below. 81 | /// 82 | /// ``` 83 | /// use std::sync::{Arc, Mutex}; 84 | /// 85 | /// use ureq::{Body, SendBody}; 86 | /// use ureq::middleware::{Middleware, MiddlewareNext}; 87 | /// use ureq::http::{Request, Response}; 88 | /// 89 | /// struct MyState { 90 | /// // whatever is needed 91 | /// } 92 | /// 93 | /// struct MyMiddleware(Arc>); 94 | /// 95 | /// impl Middleware for MyMiddleware { 96 | /// fn handle(&self, request: Request, next: MiddlewareNext) 97 | /// -> Result, ureq::Error> { 98 | /// 99 | /// // These extra brackets ensures we release the Mutex lock before continuing the 100 | /// // chain. There could also be scenarios where we want to maintain the lock through 101 | /// // the invocation, which would block other requests from proceeding concurrently 102 | /// // through the middleware. 103 | /// { 104 | /// let mut state = self.0.lock().unwrap(); 105 | /// // do stuff with state 106 | /// } 107 | /// 108 | /// // continue middleware chain 109 | /// next.handle(request) 110 | /// } 111 | /// } 112 | /// ``` 113 | /// 114 | /// ## Example with atomic 115 | /// 116 | /// This example shows how we can increase a counter for each request going 117 | /// through the agent. 118 | /// 119 | /// ``` 120 | /// use ureq::{Body, SendBody, Agent, config::Config}; 121 | /// use ureq::middleware::{Middleware, MiddlewareNext}; 122 | /// use ureq::http::{Request, Response}; 123 | /// use std::sync::atomic::{AtomicU64, Ordering}; 124 | /// use std::sync::Arc; 125 | /// 126 | /// // Middleware that stores a counter state. This example uses an AtomicU64 127 | /// // since the middleware is potentially shared by multiple threads running 128 | /// // requests at the same time. 129 | /// struct MyCounter(Arc); 130 | /// 131 | /// impl Middleware for MyCounter { 132 | /// fn handle(&self, req: Request, next: MiddlewareNext) 133 | /// -> Result, ureq::Error> { 134 | /// 135 | /// // increase the counter for each invocation 136 | /// self.0.fetch_add(1, Ordering::Relaxed); 137 | /// 138 | /// // continue the middleware chain 139 | /// next.handle(req) 140 | /// } 141 | /// } 142 | /// 143 | /// let shared_counter = Arc::new(AtomicU64::new(0)); 144 | /// 145 | /// let mut config = Config::builder() 146 | /// .middleware(MyCounter(shared_counter.clone())) 147 | /// .build(); 148 | /// 149 | /// let agent: Agent = config.into(); 150 | /// 151 | /// agent.get("http://httpbin.org/get").call()?; 152 | /// agent.get("http://httpbin.org/get").call()?; 153 | /// 154 | /// // Check we did indeed increase the counter twice. 155 | /// assert_eq!(shared_counter.load(Ordering::Relaxed), 2); 156 | /// 157 | /// # Ok::<_, ureq::Error>(()) 158 | /// ``` 159 | pub trait Middleware: Send + Sync + 'static { 160 | /// Handle of the middleware logic. 161 | fn handle( 162 | &self, 163 | request: http::Request, 164 | next: MiddlewareNext, 165 | ) -> Result, Error>; 166 | } 167 | 168 | #[derive(Clone, Default)] 169 | pub(crate) struct MiddlewareChain { 170 | chain: Arc>>, 171 | } 172 | 173 | impl MiddlewareChain { 174 | pub(crate) fn add(&mut self, mw: impl Middleware) { 175 | let Some(chain) = Arc::get_mut(&mut self.chain) else { 176 | panic!("Can't add to a MiddlewareChain that is already cloned") 177 | }; 178 | 179 | chain.push(Box::new(mw)); 180 | } 181 | } 182 | 183 | /// Continuation of a [`Middleware`] chain. 184 | pub struct MiddlewareNext<'a> { 185 | agent: &'a Agent, 186 | index: usize, 187 | } 188 | 189 | impl<'a> MiddlewareNext<'a> { 190 | pub(crate) fn new(agent: &'a Agent) -> Self { 191 | MiddlewareNext { agent, index: 0 } 192 | } 193 | 194 | /// Continue the middleware chain. 195 | /// 196 | /// The middleware must call this in order to run the request. Not calling 197 | /// it is a valid choice for not wanting the request to execute. 198 | pub fn handle( 199 | mut self, 200 | request: http::Request, 201 | ) -> Result, Error> { 202 | if let Some(mw) = self.agent.config().middleware.chain.get(self.index) { 203 | // This middleware exists, run it. 204 | self.index += 1; 205 | mw.handle(request, self) 206 | } else { 207 | // When chain is over, call the main run(). 208 | let (parts, body) = request.into_parts(); 209 | let request = http::Request::from_parts(parts, ()); 210 | run(self.agent, request, body) 211 | } 212 | } 213 | } 214 | 215 | impl Middleware for F 216 | where 217 | F: Fn(http::Request, MiddlewareNext) -> Result, Error> 218 | + Send 219 | + Sync 220 | + 'static, 221 | { 222 | fn handle( 223 | &self, 224 | request: http::Request, 225 | next: MiddlewareNext, 226 | ) -> Result, Error> { 227 | (self)(request, next) 228 | } 229 | } 230 | 231 | impl fmt::Debug for MiddlewareChain { 232 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 233 | f.debug_struct("MiddlewareChain") 234 | .field("len", &self.chain.len()) 235 | .finish() 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /src/pool.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::fmt; 3 | use std::sync::{Arc, Mutex, Weak}; 4 | 5 | use http::uri::{Authority, Scheme}; 6 | use http::Uri; 7 | 8 | use crate::config::Config; 9 | use crate::http; 10 | use crate::proxy::Proxy; 11 | use crate::transport::time::{Duration, Instant}; 12 | use crate::transport::{Buffers, ConnectionDetails, Connector, NextTimeout, Transport}; 13 | use crate::util::DebugAuthority; 14 | use crate::Error; 15 | 16 | pub(crate) struct ConnectionPool { 17 | connector: Box>>, 18 | pool: Arc>, 19 | } 20 | 21 | impl ConnectionPool { 22 | pub fn new(connector: Box>>, config: &Config) -> Self { 23 | ConnectionPool { 24 | connector, 25 | pool: Arc::new(Mutex::new(Pool::new(config))), 26 | } 27 | } 28 | 29 | pub fn connect( 30 | &self, 31 | details: &ConnectionDetails, 32 | max_idle_age: Duration, 33 | ) -> Result { 34 | let key = details.into(); 35 | 36 | { 37 | let mut pool = self.pool.lock().unwrap(); 38 | pool.purge(details.now); 39 | 40 | if let Some(conn) = pool.get(&key, max_idle_age, details.now) { 41 | debug!("Use pooled: {:?}", key); 42 | return Ok(conn); 43 | } 44 | } 45 | 46 | let transport = self.run_connector(details)?; 47 | 48 | let conn = Connection { 49 | transport, 50 | key, 51 | last_use: details.now, 52 | pool: Arc::downgrade(&self.pool), 53 | position_per_host: None, 54 | }; 55 | 56 | Ok(conn) 57 | } 58 | 59 | pub fn run_connector(&self, details: &ConnectionDetails) -> Result, Error> { 60 | let transport = self 61 | .connector 62 | .connect(details, None)? 63 | .ok_or(Error::ConnectionFailed)?; 64 | 65 | Ok(transport) 66 | } 67 | 68 | #[cfg(test)] 69 | /// Exposed for testing the pool count. 70 | pub fn pool_count(&self) -> usize { 71 | let lock = self.pool.lock().unwrap(); 72 | lock.lru.len() 73 | } 74 | } 75 | 76 | pub(crate) struct Connection { 77 | transport: Box, 78 | key: PoolKey, 79 | last_use: Instant, 80 | pool: Weak>, 81 | 82 | /// Used to prune max_idle_connections_by_host. 83 | /// 84 | /// # Example 85 | /// 86 | /// If we have a max idle per hosts set to 3, and we have the following LRU: 87 | /// 88 | /// ```text 89 | /// [B, A, A, B, A, B, A] 90 | /// ``` 91 | /// 92 | /// This field is used to enumerate the elements per host reverse: 93 | /// 94 | /// ```text 95 | /// [B2, A3, A2, B1, A1, B0, A0] 96 | /// ``` 97 | /// 98 | /// Once we have that enumeration, we can drop elements from the front where there 99 | /// position_per_host >= idle_per_host. 100 | position_per_host: Option, 101 | } 102 | 103 | impl Connection { 104 | pub fn buffers(&mut self) -> &mut dyn Buffers { 105 | self.transport.buffers() 106 | } 107 | 108 | pub fn transmit_output(&mut self, amount: usize, timeout: NextTimeout) -> Result<(), Error> { 109 | self.transport.transmit_output(amount, timeout) 110 | } 111 | 112 | pub fn maybe_await_input(&mut self, timeout: NextTimeout) -> Result { 113 | self.transport.maybe_await_input(timeout) 114 | } 115 | 116 | pub fn consume_input(&mut self, amount: usize) { 117 | self.transport.buffers().input_consume(amount) 118 | } 119 | 120 | pub fn close(self) { 121 | debug!("Close: {:?}", self.key); 122 | // Just consume self. 123 | } 124 | 125 | pub fn reuse(mut self, now: Instant) { 126 | if !self.transport.is_open() { 127 | // The purpose of probing is that is_open() for tcp connector attempts 128 | // to read some more bytes. If that succeeds, the connection is considered 129 | // _NOT_ open, since that means we either failed to read the previous 130 | // body to end, or the server sent bogus data after the body. Either 131 | // is a condition where we mustn't reuse the connection. 132 | return; 133 | } 134 | self.last_use = now; 135 | 136 | let Some(arc) = self.pool.upgrade() else { 137 | debug!("Pool gone: {:?}", self.key); 138 | return; 139 | }; 140 | 141 | debug!("Return to pool: {:?}", self.key); 142 | 143 | let mut pool = arc.lock().unwrap(); 144 | 145 | pool.add(self); 146 | pool.purge(now); 147 | } 148 | 149 | pub fn is_tls(&self) -> bool { 150 | self.transport.is_tls() 151 | } 152 | 153 | fn age(&self, now: Instant) -> Duration { 154 | now.duration_since(now) 155 | } 156 | 157 | fn is_open(&mut self) -> bool { 158 | self.transport.is_open() 159 | } 160 | } 161 | 162 | /// The pool key is the Scheme, Authority from the uri and the Proxy setting 163 | /// 164 | /// 165 | /// ```notrust 166 | /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 167 | /// |-| |-------------------------------||--------| |-------------------| |-----| 168 | /// | | | | | 169 | /// scheme authority path query fragment 170 | /// ``` 171 | /// 172 | /// It's correct to include username/password since connections with differing such and 173 | /// the same host/port must not be mixed up. 174 | /// 175 | #[derive(Clone, PartialEq, Eq)] 176 | struct PoolKey(Arc); 177 | 178 | impl PoolKey { 179 | fn new(uri: &Uri, proxy: Option<&Proxy>) -> Self { 180 | let inner = PoolKeyInner( 181 | uri.scheme().expect("uri with scheme").clone(), 182 | uri.authority().expect("uri with authority").clone(), 183 | proxy.cloned(), 184 | ); 185 | 186 | PoolKey(Arc::new(inner)) 187 | } 188 | } 189 | 190 | #[derive(PartialEq, Eq)] 191 | struct PoolKeyInner(Scheme, Authority, Option); 192 | 193 | #[derive(Debug)] 194 | struct Pool { 195 | lru: VecDeque, 196 | max_idle_connections: usize, 197 | max_idle_connections_per_host: usize, 198 | max_idle_age: Duration, 199 | } 200 | 201 | impl Pool { 202 | fn new(config: &Config) -> Self { 203 | Pool { 204 | lru: VecDeque::new(), 205 | max_idle_connections: config.max_idle_connections(), 206 | max_idle_connections_per_host: config.max_idle_connections_per_host(), 207 | max_idle_age: config.max_idle_age().into(), 208 | } 209 | } 210 | 211 | fn purge(&mut self, now: Instant) { 212 | while self.lru.len() > self.max_idle_connections || self.front_is_too_old(now) { 213 | self.lru.pop_front(); 214 | } 215 | 216 | self.update_position_per_host(); 217 | 218 | let max = self.max_idle_connections_per_host; 219 | 220 | // unwrap is ok because update_position_per_host() should have set all 221 | self.lru.retain(|c| c.position_per_host.unwrap() < max); 222 | } 223 | 224 | fn front_is_too_old(&self, now: Instant) -> bool { 225 | self.lru.front().map(|c| c.age(now)) > Some(self.max_idle_age) 226 | } 227 | 228 | fn update_position_per_host(&mut self) { 229 | // Reset position counters 230 | for c in &mut self.lru { 231 | c.position_per_host = None; 232 | } 233 | 234 | loop { 235 | let maybe_uncounted = self 236 | .lru 237 | .iter() 238 | .rev() 239 | .find(|c| c.position_per_host.is_none()); 240 | 241 | let Some(uncounted) = maybe_uncounted else { 242 | break; // nothing more to count. 243 | }; 244 | 245 | let key_to_count = uncounted.key.clone(); 246 | 247 | for (position, c) in self 248 | .lru 249 | .iter_mut() 250 | .rev() 251 | .filter(|c| c.key == key_to_count) 252 | .enumerate() 253 | { 254 | c.position_per_host = Some(position); 255 | } 256 | } 257 | } 258 | 259 | fn add(&mut self, conn: Connection) { 260 | self.lru.push_back(conn) 261 | } 262 | 263 | fn get(&mut self, key: &PoolKey, max_idle_age: Duration, now: Instant) -> Option { 264 | while let Some(i) = self.lru.iter().position(|c| c.key == *key) { 265 | let mut conn = self.lru.remove(i).unwrap(); // unwrap ok since we just got the position 266 | 267 | // Before we release the connection, we probe that it appears to still work. 268 | if !conn.is_open() { 269 | // This connection is broken. Try find another one. 270 | continue; 271 | } 272 | 273 | if conn.age(now) >= max_idle_age { 274 | // A max_duration that is shorter in the request than the pool. 275 | // This connection survives in the pool, but is not used for this 276 | // specific connection. 277 | continue; 278 | } 279 | 280 | return Some(conn); 281 | } 282 | None 283 | } 284 | } 285 | 286 | impl fmt::Debug for ConnectionPool { 287 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 288 | f.debug_struct("ConnectionPool") 289 | .field("connector", &self.connector) 290 | .finish() 291 | } 292 | } 293 | 294 | impl fmt::Debug for Connection { 295 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 296 | f.debug_struct("Connection") 297 | .field("key", &self.key) 298 | .field("conn", &self.transport) 299 | .finish() 300 | } 301 | } 302 | 303 | impl fmt::Debug for PoolKey { 304 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 305 | f.debug_struct("PoolKey") 306 | .field("scheme", &self.0 .0) 307 | .field("authority", &DebugAuthority(&self.0 .1)) 308 | .field("proxy", &self.0 .2) 309 | .finish() 310 | } 311 | } 312 | 313 | impl<'a, 'b> From<&'a ConnectionDetails<'b>> for PoolKey { 314 | fn from(details: &'a ConnectionDetails) -> Self { 315 | PoolKey::new(details.uri, details.config.proxy()) 316 | } 317 | } 318 | 319 | #[cfg(all(test, feature = "_test"))] 320 | mod test { 321 | use super::*; 322 | 323 | #[test] 324 | fn poolkey_new() { 325 | // Test that PoolKey::new() does not panic on unrecognized schemes. 326 | PoolKey::new(&Uri::from_static("zzz://example.com"), None); 327 | } 328 | } 329 | -------------------------------------------------------------------------------- /src/query.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::fmt; 3 | use std::iter::Enumerate; 4 | use std::ops::Deref; 5 | use std::str::Chars; 6 | 7 | use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS}; 8 | 9 | /// AsciiSet for characters that need to be percent-encoded in URL query parameters. 10 | /// 11 | /// This set follows URL specification from 12 | pub const ENCODED_IN_QUERY: &AsciiSet = &CONTROLS 13 | .add(b' ') 14 | .add(b'"') 15 | .add(b'#') 16 | .add(b'$') 17 | .add(b'%') 18 | .add(b'&') 19 | .add(b'\'') // Single quote should be encoded according to the URL specs 20 | .add(b'+') 21 | .add(b',') 22 | .add(b'/') 23 | .add(b':') 24 | .add(b';') 25 | .add(b'<') 26 | .add(b'=') 27 | .add(b'>') 28 | .add(b'?') 29 | .add(b'@') 30 | .add(b'[') 31 | .add(b'\\') 32 | .add(b']') 33 | .add(b'^') 34 | .add(b'`') 35 | .add(b'{') 36 | .add(b'|') 37 | .add(b'}'); 38 | 39 | #[derive(Clone)] 40 | pub(crate) struct QueryParam<'a> { 41 | source: Source<'a>, 42 | } 43 | 44 | #[derive(Clone)] 45 | enum Source<'a> { 46 | Borrowed(&'a str), 47 | Owned(String), 48 | } 49 | 50 | /// Percent-encode a string using the ENCODED_IN_QUERY set. 51 | pub fn url_enc(i: &str) -> Cow { 52 | utf8_percent_encode(i, ENCODED_IN_QUERY).into() 53 | } 54 | 55 | /// Percent-encode a string using the ENCODED_IN_QUERY set, but replace encoded `%20` with `+`. 56 | pub fn form_url_enc(i: &str) -> Cow { 57 | let mut iter = utf8_percent_encode(i, ENCODED_IN_QUERY).map(|part| match part { 58 | "%20" => "+", 59 | _ => part, 60 | }); 61 | 62 | // We try to avoid allocating if we can (returning a Cow). 63 | match iter.next() { 64 | None => "".into(), 65 | Some(first) => match iter.next() { 66 | // Case avoids allocation 67 | None => first.into(), 68 | // Following allocates 69 | Some(second) => { 70 | let mut string = first.to_owned(); 71 | string.push_str(second); 72 | string.extend(iter); 73 | string.into() 74 | } 75 | }, 76 | } 77 | } 78 | 79 | impl<'a> QueryParam<'a> { 80 | /// Create a new key-value pair with both the key and value percent-encoded. 81 | pub fn new_key_value(param: &str, value: &str) -> QueryParam<'static> { 82 | let s = format!("{}={}", url_enc(param), url_enc(value)); 83 | QueryParam { 84 | source: Source::Owned(s), 85 | } 86 | } 87 | 88 | /// Create a new key-value pair without percent-encoding. 89 | /// 90 | /// This is used by query_raw() to add parameters that are already encoded 91 | /// or that should not be encoded. 92 | pub fn new_key_value_raw(param: &str, value: &str) -> QueryParam<'static> { 93 | let s = format!("{}={}", param, value); 94 | QueryParam { 95 | source: Source::Owned(s), 96 | } 97 | } 98 | 99 | fn as_str(&self) -> &str { 100 | match &self.source { 101 | Source::Borrowed(v) => v, 102 | Source::Owned(v) => v.as_str(), 103 | } 104 | } 105 | } 106 | 107 | pub(crate) fn parse_query_params(query_string: &str) -> impl Iterator> { 108 | assert!(query_string.is_ascii()); 109 | QueryParamIterator(query_string, query_string.chars().enumerate()) 110 | } 111 | 112 | struct QueryParamIterator<'a>(&'a str, Enumerate>); 113 | 114 | impl<'a> Iterator for QueryParamIterator<'a> { 115 | type Item = QueryParam<'a>; 116 | 117 | fn next(&mut self) -> Option { 118 | let mut first = None; 119 | let mut value = None; 120 | let mut separator = None; 121 | 122 | for (n, c) in self.1.by_ref() { 123 | if first.is_none() { 124 | first = Some(n); 125 | } 126 | if value.is_none() && c == '=' { 127 | value = Some(n + 1); 128 | } 129 | if c == '&' { 130 | separator = Some(n); 131 | break; 132 | } 133 | } 134 | 135 | if let Some(start) = first { 136 | let end = separator.unwrap_or(self.0.len()); 137 | let chunk = &self.0[start..end]; 138 | return Some(QueryParam { 139 | source: Source::Borrowed(chunk), 140 | }); 141 | } 142 | 143 | None 144 | } 145 | } 146 | 147 | impl<'a> fmt::Debug for QueryParam<'a> { 148 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 149 | f.debug_tuple("QueryParam").field(&self.as_str()).finish() 150 | } 151 | } 152 | 153 | impl<'a> fmt::Display for QueryParam<'a> { 154 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 155 | match &self.source { 156 | Source::Borrowed(v) => write!(f, "{}", v), 157 | Source::Owned(v) => write!(f, "{}", v), 158 | } 159 | } 160 | } 161 | 162 | impl<'a> Deref for QueryParam<'a> { 163 | type Target = str; 164 | 165 | fn deref(&self) -> &Self::Target { 166 | self.as_str() 167 | } 168 | } 169 | 170 | impl<'a> PartialEq for QueryParam<'a> { 171 | fn eq(&self, other: &Self) -> bool { 172 | self.as_str() == other.as_str() 173 | } 174 | } 175 | 176 | #[cfg(test)] 177 | mod test { 178 | use super::*; 179 | 180 | use crate::http::Uri; 181 | 182 | #[test] 183 | fn query_string_does_not_start_with_question_mark() { 184 | let u: Uri = "https://foo.com/qwe?abc=qwe".parse().unwrap(); 185 | assert_eq!(u.query(), Some("abc=qwe")); 186 | } 187 | 188 | #[test] 189 | fn percent_encoding_is_not_decoded() { 190 | let u: Uri = "https://foo.com/qwe?abc=%20123".parse().unwrap(); 191 | assert_eq!(u.query(), Some("abc=%20123")); 192 | } 193 | 194 | #[test] 195 | fn fragments_are_not_a_thing() { 196 | let u: Uri = "https://foo.com/qwe?abc=qwe#yaz".parse().unwrap(); 197 | assert_eq!(u.to_string(), "https://foo.com/qwe?abc=qwe"); 198 | } 199 | 200 | fn p(s: &str) -> Vec { 201 | parse_query_params(s).map(|q| q.to_string()).collect() 202 | } 203 | 204 | #[test] 205 | fn parse_query_string() { 206 | assert_eq!(parse_query_params("").next(), None); 207 | assert_eq!(p("&"), vec![""]); 208 | assert_eq!(p("="), vec!["="]); 209 | assert_eq!(p("&="), vec!["", "="]); 210 | assert_eq!(p("foo=bar"), vec!["foo=bar"]); 211 | assert_eq!(p("foo=bar&"), vec!["foo=bar"]); 212 | assert_eq!(p("foo=bar&foo2=bar2"), vec!["foo=bar", "foo2=bar2"]); 213 | } 214 | 215 | #[test] 216 | fn do_not_url_encode_some_things() { 217 | const NOT_ENCODE: &str = "!()*-._~"; 218 | let q = QueryParam::new_key_value("key", NOT_ENCODE); 219 | assert_eq!(q.as_str(), format!("key={}", NOT_ENCODE)); 220 | } 221 | 222 | #[test] 223 | fn special_encoding_space_for_form() { 224 | let value = "value with spaces and 'quotes'"; 225 | let form = form_url_enc(value); 226 | assert_eq!(form.as_ref(), "value+with+spaces+and+%27quotes%27"); 227 | } 228 | 229 | #[test] 230 | fn do_encode_single_quote() { 231 | let value = "value'with'quotes"; 232 | let q = QueryParam::new_key_value("key", value); 233 | assert_eq!(q.as_str(), "key=value%27with%27quotes"); 234 | } 235 | 236 | #[test] 237 | fn raw_query_param_no_encoding() { 238 | // Use URI-valid characters for the raw param test 239 | let value = "value-without-spaces&special='chars'"; 240 | let q = QueryParam::new_key_value_raw("key", value); 241 | assert_eq!(q.as_str(), format!("key={}", value)); 242 | 243 | // Verify that symbols like &=+?/ remain unencoded in raw mode 244 | // but are encoded in normal mode 245 | let special_symbols = "symbols&=+?/'"; 246 | let q_raw = QueryParam::new_key_value_raw("raw", special_symbols); 247 | let q_encoded = QueryParam::new_key_value("encoded", special_symbols); 248 | 249 | // Raw should preserve all special chars, encoded should encode them 250 | assert_eq!(q_raw.as_str(), "raw=symbols&=+?/'"); 251 | assert_ne!(q_raw.as_str(), q_encoded.as_str()); 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /src/request_ext.rs: -------------------------------------------------------------------------------- 1 | use crate::config::typestate::RequestExtScope; 2 | use crate::config::{Config, ConfigBuilder, RequestLevelConfig}; 3 | use crate::{http, Agent, AsSendBody, Body, Error}; 4 | use std::ops::Deref; 5 | use ureq_proto::http::{Request, Response}; 6 | 7 | /// Extension trait for [`http::Request`]. 8 | /// 9 | /// Adds additional convenience methods to the `Request` that are not available 10 | /// in the plain http API. 11 | pub trait RequestExt 12 | where 13 | S: AsSendBody, 14 | { 15 | /// Allows configuring the request behaviour, starting with the default [`Agent`]. 16 | /// 17 | /// This method allows configuring the request by using the default Agent, and performing 18 | /// additional configurations on top. 19 | /// This method returns a `WithAgent` struct that it is possible to call `configure()` and `run()` 20 | /// on to configure the request behaviour, or run the request. 21 | /// 22 | /// # Example 23 | /// 24 | /// ``` 25 | /// use ureq::{http, RequestExt, Error}; 26 | /// 27 | /// let request: Result, Error> = http::Request::builder() 28 | /// .method(http::Method::GET) 29 | /// .uri("http://foo.bar") 30 | /// .body(()) 31 | /// .unwrap() 32 | /// .with_default_agent() 33 | /// .configure() 34 | /// .http_status_as_error(false) 35 | /// .run(); 36 | /// ``` 37 | fn with_default_agent(self) -> WithAgent<'static, S> 38 | where 39 | Self: Sized, 40 | { 41 | let agent = Agent::new_with_defaults(); 42 | Self::with_agent(self, agent) 43 | } 44 | 45 | /// Allows configuring this request behaviour, using a specific [`Agent`]. 46 | /// 47 | /// This method allows configuring the request by using a user-provided `Agent` and performing 48 | /// additional configurations on top. 49 | /// This method returns a `WithAgent` struct that it is possible to call `configure()` and `run()` 50 | /// on to configure the request behaviour, or run the request. 51 | /// 52 | /// # Example 53 | /// 54 | /// ``` 55 | /// use ureq::{http, Agent, RequestExt, Error}; 56 | /// use std::time::Duration; 57 | /// let agent = Agent::config_builder() 58 | /// .timeout_global(Some(Duration::from_secs(30))) 59 | /// .build() 60 | /// .new_agent(); 61 | /// 62 | /// let request: Result, Error> = http::Request::builder() 63 | /// .method(http::Method::GET) 64 | /// .uri("http://foo.bar") 65 | /// .body(()) 66 | /// .unwrap() 67 | /// .with_agent(&agent) 68 | /// .run(); 69 | /// ``` 70 | /// # Example with further customizations 71 | /// 72 | /// In this example we use a specific agent, but apply a request-specific configuration on top. 73 | /// 74 | /// ``` 75 | /// use ureq::{http, Agent, RequestExt, Error}; 76 | /// use std::time::Duration; 77 | /// let mut agent = Agent::config_builder() 78 | /// .timeout_global(Some(Duration::from_secs(30))) 79 | /// .build() 80 | /// .new_agent(); 81 | /// 82 | /// let request: Result, Error> = http::Request::builder() 83 | /// .method(http::Method::GET) 84 | /// .uri("http://foo.bar") 85 | /// .body(()) 86 | /// .unwrap() 87 | /// .with_agent(&agent) 88 | /// .configure() 89 | /// .http_status_as_error(false) 90 | /// .run(); 91 | /// ``` 92 | fn with_agent<'a>(self, agent: impl Into>) -> WithAgent<'a, S>; 93 | } 94 | 95 | /// Wrapper struct that holds a [`Request`] associated with an [`Agent`]. 96 | pub struct WithAgent<'a, S: AsSendBody> { 97 | pub(crate) agent: AgentRef<'a>, 98 | pub(crate) request: Request, 99 | } 100 | 101 | impl<'a, S: AsSendBody> WithAgent<'a, S> { 102 | /// Returns a [`ConfigBuilder`] for configuring the request. 103 | /// 104 | /// This allows setting additional request-specific options before sending the request. 105 | pub fn configure(self) -> ConfigBuilder> { 106 | ConfigBuilder(RequestExtScope(self)) 107 | } 108 | 109 | /// Executes the request using the associated [`Agent`]. 110 | pub fn run(self) -> Result, Error> { 111 | self.agent.run(self.request) 112 | } 113 | } 114 | 115 | impl<'a, S: AsSendBody> WithAgent<'a, S> { 116 | pub(crate) fn request_level_config(&mut self) -> &mut Config { 117 | let request_level_config = self 118 | .request 119 | .extensions_mut() 120 | .get_mut::(); 121 | 122 | if request_level_config.is_none() { 123 | self.request 124 | .extensions_mut() 125 | .insert(self.agent.new_request_level_config()); 126 | } 127 | 128 | // Unwrap is safe because of the above check 129 | let req_level: &mut RequestLevelConfig = self 130 | .request 131 | .extensions_mut() 132 | .get_mut::() 133 | .unwrap(); 134 | 135 | &mut req_level.0 136 | } 137 | } 138 | 139 | /// Reference type to hold an owned or borrowed [`Agent`]. 140 | pub enum AgentRef<'a> { 141 | Owned(Agent), 142 | Borrowed(&'a Agent), 143 | } 144 | 145 | impl RequestExt for http::Request { 146 | fn with_agent<'a>(self, agent: impl Into>) -> WithAgent<'a, S> { 147 | WithAgent { 148 | agent: agent.into(), 149 | request: self, 150 | } 151 | } 152 | } 153 | 154 | impl From for AgentRef<'static> { 155 | fn from(value: Agent) -> Self { 156 | AgentRef::Owned(value) 157 | } 158 | } 159 | 160 | impl<'a> From<&'a Agent> for AgentRef<'a> { 161 | fn from(value: &'a Agent) -> Self { 162 | AgentRef::Borrowed(value) 163 | } 164 | } 165 | 166 | impl Deref for AgentRef<'_> { 167 | type Target = Agent; 168 | 169 | fn deref(&self) -> &Self::Target { 170 | match self { 171 | AgentRef::Owned(agent) => agent, 172 | AgentRef::Borrowed(agent) => agent, 173 | } 174 | } 175 | } 176 | 177 | #[cfg(test)] 178 | mod tests { 179 | use super::*; 180 | use crate::config::RequestLevelConfig; 181 | use std::time::Duration; 182 | 183 | #[test] 184 | fn configure_request_with_default_agent() { 185 | // Create `http` crate request and configure with trait 186 | let request = http::Request::builder() 187 | .method(http::Method::GET) 188 | .uri("http://foo.bar") 189 | .body(()) 190 | .unwrap() 191 | .with_default_agent() 192 | .configure() 193 | .https_only(true) 194 | .build(); 195 | 196 | // Assert that the request-level configuration has been set 197 | let request_config = request 198 | .request 199 | .extensions() 200 | .get::() 201 | .cloned() 202 | .unwrap(); 203 | 204 | assert!(request_config.0.https_only()); 205 | } 206 | 207 | #[test] 208 | fn configure_request_default_agent_2() { 209 | // Create `http` crate request and configure with trait 210 | let request = http::Request::builder() 211 | .method(http::Method::GET) 212 | .uri("http://foo.bar") 213 | .body(()) 214 | .unwrap() 215 | .with_default_agent() 216 | .configure() 217 | .https_only(false) 218 | .build(); 219 | 220 | // Assert that the request-level configuration has been set 221 | let request_config = request 222 | .request 223 | .extensions() 224 | .get::() 225 | .cloned() 226 | .unwrap(); 227 | 228 | assert!(!request_config.0.https_only()); 229 | } 230 | 231 | #[test] 232 | fn configure_request_default_agent_3() { 233 | // Create `http` crate request 234 | let request = http::Request::builder() 235 | .method(http::Method::POST) 236 | .uri("http://foo.bar") 237 | .body("Some body") 238 | .unwrap(); 239 | 240 | // Configure with the trait 241 | let request = request 242 | .with_default_agent() 243 | .configure() 244 | .http_status_as_error(true) 245 | .build(); 246 | 247 | let request_config = request 248 | .request 249 | .extensions() 250 | .get::() 251 | .cloned() 252 | .unwrap(); 253 | 254 | assert!(request_config.0.http_status_as_error()); 255 | } 256 | 257 | #[test] 258 | fn configure_request_default_agent_4() { 259 | // Create `http` crate request 260 | let request = http::Request::builder() 261 | .method(http::Method::POST) 262 | .uri("http://foo.bar") 263 | .body("Some body") 264 | .unwrap(); 265 | 266 | // Configure with the trait 267 | let request = request 268 | .with_default_agent() 269 | .configure() 270 | .http_status_as_error(false) 271 | .build(); 272 | 273 | let request_config = request 274 | .request 275 | .extensions() 276 | .get::() 277 | .cloned() 278 | .unwrap(); 279 | 280 | assert!(!request_config.0.http_status_as_error()); 281 | } 282 | 283 | #[test] 284 | fn configure_request_specified_agent() { 285 | // Create `http` crate request 286 | let request = http::Request::builder() 287 | .method(http::Method::POST) 288 | .uri("http://foo.bar") 289 | .body("Some body") 290 | .unwrap(); 291 | 292 | // Configure with the trait 293 | let agent = Agent::config_builder() 294 | .timeout_per_call(Some(Duration::from_secs(60))) 295 | .build() 296 | .new_agent(); 297 | 298 | let request = request 299 | .with_agent(&agent) 300 | .configure() 301 | .http_status_as_error(false) 302 | .build(); 303 | 304 | let request_config = request 305 | .request 306 | .extensions() 307 | .get::() 308 | .cloned() 309 | .unwrap(); 310 | 311 | // The request-level config is the agent defaults + the explicitly configured stuff 312 | assert!(!request_config.0.http_status_as_error()); 313 | assert_eq!( 314 | request_config.0.timeouts().per_call, 315 | Some(Duration::from_secs(60)) 316 | ); 317 | } 318 | } 319 | -------------------------------------------------------------------------------- /src/response.rs: -------------------------------------------------------------------------------- 1 | use http::Uri; 2 | 3 | use crate::body::Body; 4 | use crate::http; 5 | 6 | #[derive(Debug, Clone)] 7 | pub(crate) struct ResponseUri(pub http::Uri); 8 | 9 | #[derive(Debug, Clone)] 10 | pub(crate) struct RedirectHistory(pub Vec); 11 | 12 | /// Extension trait for [`http::Response`]. 13 | /// 14 | /// Adds additional convenience methods to the `Response` that are not available 15 | /// in the plain http API. 16 | pub trait ResponseExt { 17 | /// The Uri that ultimately this Response is about. 18 | /// 19 | /// This can differ from the request uri when we have followed redirects. 20 | /// 21 | /// ``` 22 | /// use ureq::ResponseExt; 23 | /// 24 | /// let res = ureq::get("https://httpbin.org/redirect-to?url=%2Fget") 25 | /// .call().unwrap(); 26 | /// 27 | /// assert_eq!(res.get_uri(), "https://httpbin.org/get"); 28 | /// ``` 29 | fn get_uri(&self) -> &Uri; 30 | 31 | /// The full history of uris, including the request and final uri. 32 | /// 33 | /// Returns `None` when [`Config::save_redirect_history`][crate::config::Config::save_redirect_history] 34 | /// is `false`. 35 | /// 36 | /// 37 | /// ``` 38 | /// # use ureq::http::Uri; 39 | /// use ureq::ResponseExt; 40 | /// 41 | /// let uri1: Uri = "https://httpbin.org/redirect-to?url=%2Fget".parse().unwrap(); 42 | /// let uri2: Uri = "https://httpbin.org/get".parse::().unwrap(); 43 | /// 44 | /// let res = ureq::get(&uri1) 45 | /// .config() 46 | /// .save_redirect_history(true) 47 | /// .build() 48 | /// .call().unwrap(); 49 | /// 50 | /// let history = res.get_redirect_history().unwrap(); 51 | /// 52 | /// assert_eq!(history, &[uri1, uri2]); 53 | /// ``` 54 | fn get_redirect_history(&self) -> Option<&[Uri]>; 55 | } 56 | 57 | impl ResponseExt for http::Response { 58 | fn get_uri(&self) -> &Uri { 59 | &self 60 | .extensions() 61 | .get::() 62 | .expect("uri to have been set") 63 | .0 64 | } 65 | 66 | fn get_redirect_history(&self) -> Option<&[Uri]> { 67 | self.extensions() 68 | .get::() 69 | .map(|r| r.0.as_ref()) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/send_body.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::{self, Read, Stdin}; 3 | use std::net::TcpStream; 4 | 5 | use crate::body::{Body, BodyReader}; 6 | use crate::http; 7 | use crate::util::private::Private; 8 | 9 | /// Request body for sending data via POST, PUT and PATCH. 10 | /// 11 | /// Typically not interacted with directly since the trait [`AsSendBody`] is implemented 12 | /// for the majority of the types of data a user might want to send to a remote server. 13 | /// That means if you want to send things like `String`, `&str` or `[u8]`, they can be 14 | /// used directly. See documentation for [`AsSendBody`]. 15 | /// 16 | /// The exception is when using [`Read`] trait bodies, in which case we wrap the request 17 | /// body directly. See below [`SendBody::from_reader`]. 18 | /// 19 | pub struct SendBody<'a> { 20 | inner: BodyInner<'a>, 21 | ended: bool, 22 | } 23 | 24 | impl<'a> SendBody<'a> { 25 | /// Creates an empty body. 26 | pub fn none() -> SendBody<'static> { 27 | BodyInner::None.into() 28 | } 29 | 30 | /// Creates a body from a shared [`Read`] impl. 31 | pub fn from_reader(reader: &'a mut dyn Read) -> SendBody<'a> { 32 | BodyInner::Reader(reader).into() 33 | } 34 | 35 | /// Creates a body from an owned [`Read]` impl. 36 | pub fn from_owned_reader(reader: impl Read + 'static) -> SendBody<'static> { 37 | BodyInner::OwnedReader(Box::new(reader)).into() 38 | } 39 | 40 | /// Creates a body to send as JSON from any [`Serialize`](serde::ser::Serialize) value. 41 | #[cfg(feature = "json")] 42 | pub fn from_json( 43 | value: &impl serde::ser::Serialize, 44 | ) -> Result, crate::Error> { 45 | let json = serde_json::to_vec_pretty(value)?; 46 | Ok(BodyInner::ByteVec(io::Cursor::new(json)).into()) 47 | } 48 | 49 | pub(crate) fn read(&mut self, buf: &mut [u8]) -> io::Result { 50 | let n = match &mut self.inner { 51 | BodyInner::None => { 52 | return Ok(0); 53 | } 54 | BodyInner::ByteSlice(v) => { 55 | let max = v.len().min(buf.len()); 56 | 57 | buf[..max].copy_from_slice(&v[..max]); 58 | *v = &v[max..]; 59 | 60 | Ok(max) 61 | } 62 | #[cfg(feature = "json")] 63 | BodyInner::ByteVec(v) => v.read(buf), 64 | BodyInner::Reader(v) => v.read(buf), 65 | BodyInner::OwnedReader(v) => v.read(buf), 66 | BodyInner::Body(v) => v.read(buf), 67 | }?; 68 | 69 | if n == 0 { 70 | self.ended = true; 71 | } 72 | 73 | Ok(n) 74 | } 75 | 76 | pub(crate) fn body_mode(&self) -> BodyMode { 77 | self.inner.body_mode() 78 | } 79 | 80 | /// Turn this `SendBody` into a reader. 81 | /// 82 | /// This is useful in [`Middleware`][crate::middleware::Middleware] to make changes to the 83 | /// body before sending it. 84 | /// 85 | /// ``` 86 | /// use ureq::{SendBody, Body}; 87 | /// use ureq::middleware::MiddlewareNext; 88 | /// use ureq::http::{Request, Response, header::HeaderValue}; 89 | /// use std::io::Read; 90 | /// 91 | /// fn my_middleware(req: Request, next: MiddlewareNext) 92 | /// -> Result, ureq::Error> { 93 | /// 94 | /// // Take apart the request. 95 | /// let (parts, body) = req.into_parts(); 96 | /// 97 | /// // Take the first 100 bytes of the incoming send body. 98 | /// let mut reader = body.into_reader().take(100); 99 | /// 100 | /// // Create a new SendBody. 101 | /// let new_body = SendBody::from_reader(&mut reader); 102 | /// 103 | /// // Reconstitute the request. 104 | /// let req = Request::from_parts(parts, new_body); 105 | /// 106 | /// // set my bespoke header and continue the chain 107 | /// next.handle(req) 108 | /// } 109 | /// ``` 110 | pub fn into_reader(self) -> impl Sized + io::Read + 'a { 111 | ReadAdapter(self) 112 | } 113 | } 114 | 115 | struct ReadAdapter<'a>(SendBody<'a>); 116 | 117 | impl<'a> io::Read for ReadAdapter<'a> { 118 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 119 | self.0.read(buf) 120 | } 121 | } 122 | 123 | use http::Response; 124 | use ureq_proto::BodyMode; 125 | 126 | /// Trait for common types to send in POST, PUT or PATCH. 127 | /// 128 | /// Sending common data types such as `String`, `&str` or `&[u8]` require no further wrapping 129 | /// and can be sent either by [`RequestBuilder::send()`][crate::RequestBuilder::send] or using the 130 | /// `http` crate [`Request`][http::Request] directly (see example below). 131 | /// 132 | /// Implemented for: 133 | /// 134 | /// * `&str` 135 | /// * `&String` 136 | /// * `&Vec` 137 | /// * `&File` 138 | /// * `&TcpStream` 139 | /// * `&[u8]` 140 | /// * `Response` 141 | /// * `String` 142 | /// * `Vec` 143 | /// * `File` 144 | /// * `Stdin` 145 | /// * `TcpStream` 146 | /// * `UnixStream` (not on windows) 147 | /// * `&[u8; N]` 148 | /// * `()` 149 | /// 150 | /// # Example 151 | /// 152 | /// These two examples are equivalent. 153 | /// 154 | /// ``` 155 | /// let data: &[u8] = b"My special request body data"; 156 | /// 157 | /// let response = ureq::post("https://httpbin.org/post") 158 | /// .send(data)?; 159 | /// # Ok::<_, ureq::Error>(()) 160 | /// ``` 161 | /// 162 | /// Using `http` crate API 163 | /// 164 | /// ``` 165 | /// use ureq::http; 166 | /// 167 | /// let data: &[u8] = b"My special request body data"; 168 | /// 169 | /// let request = http::Request::post("https://httpbin.org/post") 170 | /// .body(data)?; 171 | /// 172 | /// let response = ureq::run(request)?; 173 | /// # Ok::<_, ureq::Error>(()) 174 | /// ``` 175 | pub trait AsSendBody: Private { 176 | #[doc(hidden)] 177 | fn as_body(&mut self) -> SendBody; 178 | } 179 | 180 | impl<'a> Private for SendBody<'a> {} 181 | impl<'a> AsSendBody for SendBody<'a> { 182 | fn as_body(&mut self) -> SendBody { 183 | SendBody { 184 | inner: match &mut self.inner { 185 | BodyInner::None => BodyInner::None, 186 | BodyInner::ByteSlice(v) => BodyInner::ByteSlice(v), 187 | #[cfg(feature = "json")] 188 | BodyInner::ByteVec(v) => BodyInner::ByteSlice(v.get_ref()), 189 | BodyInner::Reader(v) => BodyInner::Reader(v), 190 | BodyInner::Body(v) => BodyInner::Reader(v), 191 | BodyInner::OwnedReader(v) => BodyInner::Reader(v), 192 | }, 193 | ended: self.ended, 194 | } 195 | } 196 | } 197 | 198 | pub(crate) enum BodyInner<'a> { 199 | None, 200 | ByteSlice(&'a [u8]), 201 | #[cfg(feature = "json")] 202 | ByteVec(io::Cursor>), 203 | Body(Box>), 204 | Reader(&'a mut dyn Read), 205 | OwnedReader(Box), 206 | } 207 | 208 | impl<'a> BodyInner<'a> { 209 | pub fn body_mode(&self) -> BodyMode { 210 | match self { 211 | BodyInner::None => BodyMode::NoBody, 212 | BodyInner::ByteSlice(v) => BodyMode::LengthDelimited(v.len() as u64), 213 | #[cfg(feature = "json")] 214 | BodyInner::ByteVec(v) => BodyMode::LengthDelimited(v.get_ref().len() as u64), 215 | BodyInner::Body(v) => v.body_mode(), 216 | BodyInner::Reader(_) => BodyMode::Chunked, 217 | BodyInner::OwnedReader(_) => BodyMode::Chunked, 218 | } 219 | } 220 | } 221 | 222 | impl Private for &[u8] {} 223 | impl AsSendBody for &[u8] { 224 | fn as_body(&mut self) -> SendBody { 225 | BodyInner::ByteSlice(self).into() 226 | } 227 | } 228 | 229 | impl Private for &str {} 230 | impl AsSendBody for &str { 231 | fn as_body(&mut self) -> SendBody { 232 | BodyInner::ByteSlice((*self).as_ref()).into() 233 | } 234 | } 235 | 236 | impl Private for String {} 237 | impl AsSendBody for String { 238 | fn as_body(&mut self) -> SendBody { 239 | BodyInner::ByteSlice((*self).as_ref()).into() 240 | } 241 | } 242 | 243 | impl Private for Vec {} 244 | impl AsSendBody for Vec { 245 | fn as_body(&mut self) -> SendBody { 246 | BodyInner::ByteSlice((*self).as_ref()).into() 247 | } 248 | } 249 | 250 | impl Private for &String {} 251 | impl AsSendBody for &String { 252 | fn as_body(&mut self) -> SendBody { 253 | BodyInner::ByteSlice((*self).as_ref()).into() 254 | } 255 | } 256 | 257 | impl Private for &Vec {} 258 | impl AsSendBody for &Vec { 259 | fn as_body(&mut self) -> SendBody { 260 | BodyInner::ByteSlice((*self).as_ref()).into() 261 | } 262 | } 263 | 264 | impl Private for &File {} 265 | impl AsSendBody for &File { 266 | fn as_body(&mut self) -> SendBody { 267 | BodyInner::Reader(self).into() 268 | } 269 | } 270 | 271 | impl Private for &TcpStream {} 272 | impl AsSendBody for &TcpStream { 273 | fn as_body(&mut self) -> SendBody { 274 | BodyInner::Reader(self).into() 275 | } 276 | } 277 | 278 | impl Private for File {} 279 | impl AsSendBody for File { 280 | fn as_body(&mut self) -> SendBody { 281 | BodyInner::Reader(self).into() 282 | } 283 | } 284 | 285 | impl Private for TcpStream {} 286 | impl AsSendBody for TcpStream { 287 | fn as_body(&mut self) -> SendBody { 288 | BodyInner::Reader(self).into() 289 | } 290 | } 291 | 292 | impl Private for Stdin {} 293 | impl AsSendBody for Stdin { 294 | fn as_body(&mut self) -> SendBody { 295 | BodyInner::Reader(self).into() 296 | } 297 | } 298 | 299 | // MSRV 1.78 300 | // impl_into_body!(&Stdin, Reader); 301 | 302 | #[cfg(target_family = "unix")] 303 | use std::os::unix::net::UnixStream; 304 | 305 | #[cfg(target_family = "unix")] 306 | impl Private for UnixStream {} 307 | #[cfg(target_family = "unix")] 308 | impl AsSendBody for UnixStream { 309 | fn as_body(&mut self) -> SendBody { 310 | BodyInner::Reader(self).into() 311 | } 312 | } 313 | 314 | impl<'a> From> for SendBody<'a> { 315 | fn from(inner: BodyInner<'a>) -> Self { 316 | SendBody { 317 | inner, 318 | ended: false, 319 | } 320 | } 321 | } 322 | 323 | impl Private for Body {} 324 | impl AsSendBody for Body { 325 | fn as_body(&mut self) -> SendBody { 326 | BodyInner::Body(Box::new(self.as_reader())).into() 327 | } 328 | } 329 | 330 | impl Private for Response {} 331 | impl AsSendBody for Response { 332 | fn as_body(&mut self) -> SendBody { 333 | BodyInner::Body(Box::new(self.body_mut().as_reader())).into() 334 | } 335 | } 336 | 337 | impl Private for &[u8; N] {} 338 | impl AsSendBody for &[u8; N] { 339 | fn as_body(&mut self) -> SendBody { 340 | BodyInner::ByteSlice(self.as_slice()).into() 341 | } 342 | } 343 | 344 | impl Private for () {} 345 | impl AsSendBody for () { 346 | fn as_body(&mut self) -> SendBody { 347 | BodyInner::None.into() 348 | } 349 | } 350 | -------------------------------------------------------------------------------- /src/timings.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::iter::once; 3 | use std::sync::Arc; 4 | 5 | use crate::config::Timeouts; 6 | use crate::transport::time::{Duration, Instant}; 7 | 8 | /// The various timeouts. 9 | /// 10 | /// Each enum corresponds to a value in 11 | /// [`ConfigBuilder::timeout_xxx`][crate::config::ConfigBuilder::timeout_global]. 12 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 13 | #[non_exhaustive] 14 | pub enum Timeout { 15 | /// Timeout for entire operation. 16 | Global, 17 | 18 | /// Timeout for the current call (when redirected). 19 | PerCall, 20 | 21 | /// Timeout in the resolver. 22 | Resolve, 23 | 24 | /// Timeout while opening the connection. 25 | Connect, 26 | 27 | /// Timeout while sending the request headers. 28 | SendRequest, 29 | 30 | /// Internal value never seen outside ureq (since awaiting 100 is expected 31 | /// to timeout). 32 | #[doc(hidden)] 33 | Await100, 34 | 35 | /// Timeout when sending then request body. 36 | SendBody, 37 | 38 | /// Timeout while receiving the response headers. 39 | RecvResponse, 40 | 41 | /// Timeout while receiving the response body. 42 | RecvBody, 43 | } 44 | 45 | impl Timeout { 46 | /// Give the immediate preceeding Timeout 47 | fn preceeding(&self) -> impl Iterator { 48 | let prev: &[Timeout] = match self { 49 | Timeout::Resolve => &[Timeout::PerCall], 50 | Timeout::Connect => &[Timeout::Resolve], 51 | Timeout::SendRequest => &[Timeout::Connect], 52 | Timeout::Await100 => &[Timeout::SendRequest], 53 | Timeout::SendBody => &[Timeout::SendRequest, Timeout::Await100], 54 | Timeout::RecvResponse => &[Timeout::SendRequest, Timeout::SendBody], 55 | Timeout::RecvBody => &[Timeout::RecvResponse], 56 | _ => &[], 57 | }; 58 | 59 | prev.iter().copied() 60 | } 61 | 62 | /// All timeouts to check 63 | fn timeouts_to_check(&self) -> impl Iterator { 64 | // Always check Global and PerCall 65 | once(*self) 66 | .chain(self.preceeding()) 67 | .chain([Timeout::Global, Timeout::PerCall]) 68 | } 69 | 70 | /// Get the corresponding configured timeout 71 | fn configured_timeout(&self, timeouts: &Timeouts) -> Option { 72 | match self { 73 | Timeout::Global => timeouts.global, 74 | Timeout::PerCall => timeouts.per_call, 75 | Timeout::Resolve => timeouts.resolve, 76 | Timeout::Connect => timeouts.connect, 77 | Timeout::SendRequest => timeouts.send_request, 78 | Timeout::Await100 => timeouts.await_100, 79 | Timeout::SendBody => timeouts.send_body, 80 | Timeout::RecvResponse => timeouts.recv_response, 81 | Timeout::RecvBody => timeouts.recv_body, 82 | } 83 | .map(Into::into) 84 | } 85 | } 86 | 87 | #[derive(Default, Debug)] 88 | pub(crate) struct CallTimings { 89 | timeouts: Box, 90 | current_time: CurrentTime, 91 | times: Vec<(Timeout, Instant)>, 92 | } 93 | 94 | impl CallTimings { 95 | pub(crate) fn new(timeouts: Timeouts, current_time: CurrentTime) -> Self { 96 | let mut times = Vec::with_capacity(8); 97 | 98 | let now = current_time.now(); 99 | times.push((Timeout::Global, now)); 100 | times.push((Timeout::PerCall, now)); 101 | 102 | CallTimings { 103 | timeouts: Box::new(timeouts), 104 | current_time, 105 | times, 106 | } 107 | } 108 | 109 | pub(crate) fn new_call(mut self) -> CallTimings { 110 | self.times.truncate(1); // Global is in position 0. 111 | self.times.push((Timeout::PerCall, self.current_time.now())); 112 | 113 | CallTimings { 114 | timeouts: self.timeouts, 115 | current_time: self.current_time, 116 | times: self.times, 117 | } 118 | } 119 | 120 | pub(crate) fn now(&self) -> Instant { 121 | self.current_time.now() 122 | } 123 | 124 | pub(crate) fn record_time(&mut self, timeout: Timeout) { 125 | // Each time should only be recorded once 126 | assert!( 127 | self.time_of(timeout).is_none(), 128 | "{:?} recorded more than once", 129 | timeout 130 | ); 131 | 132 | // There need to be at least one preceeding time recorded 133 | // since it follows a graph/call tree. 134 | let any_preceeding = timeout 135 | .preceeding() 136 | .filter_map(|to_check| self.time_of(to_check)) 137 | .any(|_| true); 138 | 139 | assert!(any_preceeding, "{:?} has no preceeding", timeout); 140 | 141 | // Record the time 142 | self.times.push((timeout, self.current_time.now())); 143 | } 144 | 145 | fn time_of(&self, timeout: Timeout) -> Option { 146 | self.times.iter().find(|x| x.0 == timeout).map(|x| x.1) 147 | } 148 | 149 | pub(crate) fn next_timeout(&self, timeout: Timeout) -> NextTimeout { 150 | let now = self.now(); 151 | 152 | let (reason, at) = timeout 153 | .timeouts_to_check() 154 | .filter_map(|to_check| { 155 | let time = if to_check == timeout { 156 | now 157 | } else { 158 | self.time_of(to_check)? 159 | }; 160 | let timeout = to_check.configured_timeout(&self.timeouts)?; 161 | Some((to_check, time + timeout)) 162 | }) 163 | .min_by(|a, b| a.1.cmp(&b.1)) 164 | .unwrap_or((Timeout::Global, Instant::NotHappening)); 165 | 166 | let after = at.duration_since(now); 167 | 168 | NextTimeout { after, reason } 169 | } 170 | } 171 | 172 | #[derive(Clone)] 173 | pub(crate) struct CurrentTime(Arc Instant + Send + Sync + 'static>); 174 | 175 | impl CurrentTime { 176 | pub(crate) fn now(&self) -> Instant { 177 | self.0() 178 | } 179 | } 180 | 181 | /// A pair of [`Duration`] and [`Timeout`]. 182 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 183 | pub struct NextTimeout { 184 | /// Duration until next timeout. 185 | pub after: Duration, 186 | /// The name of the next timeout.s 187 | pub reason: Timeout, 188 | } 189 | 190 | impl NextTimeout { 191 | /// Returns the duration of the timeout if the timeout must happen, but avoid instant timeouts 192 | /// 193 | /// If the timeout must happen but is zero, returns 1 second 194 | pub fn not_zero(&self) -> Option { 195 | if self.after.is_not_happening() { 196 | None 197 | } else if self.after.is_zero() { 198 | Some(Duration::from_secs(1)) 199 | } else { 200 | Some(self.after) 201 | } 202 | } 203 | } 204 | 205 | impl fmt::Debug for CurrentTime { 206 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 207 | f.debug_tuple("CurrentTime").finish() 208 | } 209 | } 210 | 211 | impl Default for CurrentTime { 212 | fn default() -> Self { 213 | Self(Arc::new(Instant::now)) 214 | } 215 | } 216 | 217 | impl fmt::Display for Timeout { 218 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 219 | let r = match self { 220 | Timeout::Global => "global", 221 | Timeout::PerCall => "per call", 222 | Timeout::Resolve => "resolve", 223 | Timeout::Connect => "connect", 224 | Timeout::SendRequest => "send request", 225 | Timeout::SendBody => "send body", 226 | Timeout::Await100 => "await 100", 227 | Timeout::RecvResponse => "receive response", 228 | Timeout::RecvBody => "receive body", 229 | }; 230 | write!(f, "{}", r) 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/tls/cert.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::hash::{Hash, Hasher}; 3 | 4 | use crate::Error; 5 | 6 | /// An X509 certificate for a server or a client. 7 | /// 8 | /// These are either used as trust roots, or client authentication. 9 | /// 10 | /// The internal representation is DER form. The provided helpers for PEM 11 | /// translates to DER. 12 | #[derive(Clone, Hash)] 13 | pub struct Certificate<'a> { 14 | der: CertDer<'a>, 15 | } 16 | 17 | #[derive(Clone)] 18 | enum CertDer<'a> { 19 | Borrowed(&'a [u8]), 20 | Owned(Vec), 21 | Rustls(rustls_pki_types::CertificateDer<'static>), 22 | } 23 | 24 | impl Hash for CertDer<'_> { 25 | fn hash(&self, state: &mut H) { 26 | core::mem::discriminant(self).hash(state); 27 | self.as_ref().hash(state) 28 | } 29 | } 30 | 31 | impl<'a> AsRef<[u8]> for CertDer<'a> { 32 | fn as_ref(&self) -> &[u8] { 33 | match self { 34 | CertDer::Borrowed(v) => v, 35 | CertDer::Owned(v) => v, 36 | CertDer::Rustls(v) => v, 37 | } 38 | } 39 | } 40 | 41 | impl<'a> Certificate<'a> { 42 | /// Read an X509 certificate in DER form. 43 | /// 44 | /// Does not immediately validate whether the data provided is a valid DER formatted 45 | /// X509. That validation is the responsibility of the TLS provider. 46 | pub fn from_der(der: &'a [u8]) -> Self { 47 | let der = CertDer::Borrowed(der); 48 | Certificate { der } 49 | } 50 | 51 | /// Read an X509 certificate in PEM form. 52 | /// 53 | /// This is a shorthand for [`parse_pem`] followed by picking the first certificate. 54 | /// Fails with an error if there is no certificate found in the PEM given. 55 | /// 56 | /// Translates to DER format internally. 57 | pub fn from_pem(pem: &'a [u8]) -> Result, Error> { 58 | let item = parse_pem(pem) 59 | .find(|p| matches!(p, Err(_) | Ok(PemItem::Certificate(_)))) 60 | // None means there were no matches in the PEM chain 61 | .ok_or(Error::Tls("No pem encoded cert found"))??; 62 | 63 | let PemItem::Certificate(cert) = item else { 64 | unreachable!("matches! above for Certificate"); 65 | }; 66 | 67 | Ok(cert) 68 | } 69 | 70 | /// This certificate in DER (the internal) format. 71 | pub fn der(&self) -> &[u8] { 72 | self.der.as_ref() 73 | } 74 | 75 | /// Clones (allocates) to produce a static copy. 76 | pub fn to_owned(&self) -> Certificate<'static> { 77 | Certificate { 78 | der: CertDer::Owned(self.der.as_ref().to_vec()), 79 | } 80 | } 81 | } 82 | 83 | /// A private key used in client certificate auth. 84 | /// 85 | /// The internal representation is DER form. The provided helpers for PEM 86 | /// translates to DER. 87 | /// 88 | /// Deliberately not `Clone` to avoid accidental copies in memory. 89 | #[derive(Hash)] 90 | pub struct PrivateKey<'a> { 91 | kind: KeyKind, 92 | der: PrivateKeyDer<'a>, 93 | } 94 | 95 | enum PrivateKeyDer<'a> { 96 | Borrowed(&'a [u8]), 97 | Owned(Vec), 98 | Rustls(rustls_pki_types::PrivateKeyDer<'static>), 99 | } 100 | 101 | impl Hash for PrivateKeyDer<'_> { 102 | fn hash(&self, state: &mut H) { 103 | core::mem::discriminant(self).hash(state); 104 | match self { 105 | PrivateKeyDer::Borrowed(v) => v.hash(state), 106 | PrivateKeyDer::Owned(v) => v.hash(state), 107 | PrivateKeyDer::Rustls(v) => v.secret_der().as_ref().hash(state), 108 | } 109 | } 110 | } 111 | 112 | impl<'a> AsRef<[u8]> for PrivateKey<'a> { 113 | fn as_ref(&self) -> &[u8] { 114 | match &self.der { 115 | PrivateKeyDer::Borrowed(v) => v, 116 | PrivateKeyDer::Owned(v) => v, 117 | PrivateKeyDer::Rustls(v) => v.secret_der(), 118 | } 119 | } 120 | } 121 | 122 | /// The kind of private key. 123 | /// 124 | /// * For **rustls** any kind is valid. 125 | /// * For **native-tls** the only valid option is [`Pkcs8`](KeyKind::Pkcs8). 126 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 127 | #[non_exhaustive] 128 | pub enum KeyKind { 129 | /// An RSA private key 130 | Pkcs1, 131 | /// A PKCS#8 private key. 132 | /// 133 | /// Not encrypted with a passphrase. 134 | Pkcs8, 135 | /// A Sec1 private key 136 | Sec1, 137 | } 138 | 139 | impl<'a> PrivateKey<'a> { 140 | /// Read private key in DER form. 141 | /// 142 | /// Does not immediately validate whether the data provided is a valid DER. 143 | /// That validation is the responsibility of the TLS provider. 144 | pub fn from_der(kind: KeyKind, der: &'a [u8]) -> Self { 145 | let der = PrivateKeyDer::Borrowed(der); 146 | PrivateKey { kind, der } 147 | } 148 | 149 | /// Read a private key in PEM form. 150 | /// 151 | /// This is a shorthand for [`parse_pem`] followed by picking the first found key. 152 | /// Fails with an error if there are no keys found in the PEM given. 153 | /// 154 | /// Translates to DER format internally. 155 | pub fn from_pem(pem: &'a [u8]) -> Result, Error> { 156 | let item = parse_pem(pem) 157 | .find(|p| matches!(p, Err(_) | Ok(PemItem::PrivateKey(_)))) 158 | // None means there were no matches in the PEM chain 159 | .ok_or(Error::Tls("No pem encoded private key found"))??; 160 | 161 | let PemItem::PrivateKey(key) = item else { 162 | unreachable!("matches! above for PrivateKey"); 163 | }; 164 | 165 | Ok(key) 166 | } 167 | 168 | /// The key kind 169 | pub fn kind(&self) -> KeyKind { 170 | self.kind 171 | } 172 | 173 | /// This private key in DER (the internal) format. 174 | pub fn der(&self) -> &[u8] { 175 | self.as_ref() 176 | } 177 | 178 | /// Clones (allocates) to produce a static copy. 179 | pub fn to_owned(&self) -> PrivateKey<'static> { 180 | PrivateKey { 181 | kind: self.kind, 182 | der: match &self.der { 183 | PrivateKeyDer::Borrowed(v) => PrivateKeyDer::Owned(v.to_vec()), 184 | PrivateKeyDer::Owned(v) => PrivateKeyDer::Owned(v.to_vec()), 185 | PrivateKeyDer::Rustls(v) => PrivateKeyDer::Rustls(v.clone_key()), 186 | }, 187 | } 188 | } 189 | } 190 | 191 | /// Parser of PEM data. 192 | /// 193 | /// The data may contain one or many PEM items. The iterator produces the recognized PEM 194 | /// items and skip others. 195 | pub fn parse_pem(pem: &[u8]) -> impl Iterator, Error>> + '_ { 196 | PemIter(pem) 197 | } 198 | 199 | /// Kinds of PEM data found by [`parse_pem`] 200 | #[non_exhaustive] 201 | pub enum PemItem<'a> { 202 | /// An X509 certificate 203 | Certificate(Certificate<'a>), 204 | 205 | /// A private key 206 | PrivateKey(PrivateKey<'a>), 207 | } 208 | 209 | struct PemIter<'a>(&'a [u8]); 210 | 211 | impl<'a> Iterator for PemIter<'a> { 212 | type Item = Result, Error>; 213 | 214 | fn next(&mut self) -> Option { 215 | loop { 216 | match rustls_pemfile::read_one_from_slice(self.0) { 217 | Ok(Some((cert, rest))) => { 218 | // Move slice along for next iterator next() 219 | self.0 = rest; 220 | 221 | match cert { 222 | rustls_pemfile::Item::X509Certificate(der) => { 223 | return Some(Ok(Certificate { 224 | der: CertDer::Rustls(der), 225 | } 226 | .into())); 227 | } 228 | rustls_pemfile::Item::Pkcs1Key(der) => { 229 | return Some(Ok(PrivateKey { 230 | kind: KeyKind::Pkcs1, 231 | der: PrivateKeyDer::Rustls(der.into()), 232 | } 233 | .into())); 234 | } 235 | rustls_pemfile::Item::Pkcs8Key(der) => { 236 | return Some(Ok(PrivateKey { 237 | kind: KeyKind::Pkcs8, 238 | der: PrivateKeyDer::Rustls(der.into()), 239 | } 240 | .into())); 241 | } 242 | rustls_pemfile::Item::Sec1Key(der) => { 243 | return Some(Ok(PrivateKey { 244 | kind: KeyKind::Sec1, 245 | der: PrivateKeyDer::Rustls(der.into()), 246 | } 247 | .into())); 248 | } 249 | 250 | // Skip unhandled item type (CSR etc) 251 | _ => continue, 252 | } 253 | } 254 | 255 | // It's over 256 | Ok(None) => return None, 257 | 258 | Err(e) => { 259 | return Some(Err(Error::Pem(e))); 260 | } 261 | } 262 | } 263 | } 264 | } 265 | 266 | impl<'a> From> for PemItem<'a> { 267 | fn from(value: Certificate<'a>) -> Self { 268 | PemItem::Certificate(value) 269 | } 270 | } 271 | 272 | impl<'a> From> for PemItem<'a> { 273 | fn from(value: PrivateKey<'a>) -> Self { 274 | PemItem::PrivateKey(value) 275 | } 276 | } 277 | 278 | impl<'a> fmt::Debug for Certificate<'a> { 279 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 280 | f.debug_struct("Certificate").finish() 281 | } 282 | } 283 | 284 | impl<'a> fmt::Debug for PrivateKey<'a> { 285 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 286 | f.debug_struct("PrivateKey") 287 | .field("kind", &self.kind) 288 | .finish() 289 | } 290 | } 291 | -------------------------------------------------------------------------------- /src/tls/native_tls.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::fmt; 3 | use std::io::{Read, Write}; 4 | use std::sync::{Arc, OnceLock}; 5 | 6 | use crate::tls::{RootCerts, TlsProvider}; 7 | use crate::{transport::*, Error}; 8 | use der::pem::LineEnding; 9 | use der::Document; 10 | use native_tls::{Certificate, HandshakeError, Identity, TlsConnector}; 11 | use native_tls::{TlsConnectorBuilder, TlsStream}; 12 | 13 | use super::TlsConfig; 14 | 15 | /// Wrapper for TLS using native-tls. 16 | /// 17 | /// Requires feature flag **native-tls**. 18 | #[derive(Default)] 19 | pub struct NativeTlsConnector { 20 | connector: OnceLock, 21 | } 22 | 23 | struct CachedNativeTlsConnector { 24 | config_hash: u64, 25 | native_tls_connector: Arc, 26 | } 27 | 28 | impl Connector for NativeTlsConnector { 29 | type Out = Either; 30 | 31 | fn connect( 32 | &self, 33 | details: &ConnectionDetails, 34 | chained: Option, 35 | ) -> Result, Error> { 36 | let Some(transport) = chained else { 37 | panic!("NativeTlsConnector requires a chained transport"); 38 | }; 39 | 40 | // Only add TLS if we are connecting via HTTPS and the transport isn't TLS 41 | // already, otherwise use chained transport as is. 42 | if !details.needs_tls() || transport.is_tls() { 43 | trace!("Skip"); 44 | return Ok(Some(Either::A(transport))); 45 | } 46 | 47 | if details.config.tls_config().provider != TlsProvider::NativeTls { 48 | debug!("Skip because config is not set to Native TLS"); 49 | return Ok(Some(Either::A(transport))); 50 | } 51 | 52 | trace!("Try wrap TLS"); 53 | 54 | let connector = self.get_cached_native_tls_connector(details)?; 55 | 56 | let domain = details 57 | .uri 58 | .authority() 59 | .expect("uri authority for tls") 60 | .host() 61 | .to_string(); 62 | 63 | let adapter = TransportAdapter::new(transport.boxed()); 64 | let stream = LazyStream::Unstarted(Some((connector, domain, adapter))); 65 | 66 | let buffers = LazyBuffers::new( 67 | details.config.input_buffer_size(), 68 | details.config.output_buffer_size(), 69 | ); 70 | 71 | let transport = NativeTlsTransport { buffers, stream }; 72 | 73 | debug!("Wrapped TLS"); 74 | 75 | Ok(Some(Either::B(transport))) 76 | } 77 | } 78 | 79 | impl NativeTlsConnector { 80 | fn get_cached_native_tls_connector( 81 | &self, 82 | details: &ConnectionDetails, 83 | ) -> Result, Error> { 84 | let tls_config = details.config.tls_config(); 85 | 86 | let connector = if details.request_level { 87 | // If the TlsConfig is request level, it is not allowed to 88 | // initialize the self.config OnceLock, but it should 89 | // reuse the cached value if it is the same TlsConfig 90 | // by comparing the config_hash value. 91 | 92 | let is_cached = self 93 | .connector 94 | .get() 95 | .map(|c| c.config_hash == tls_config.hash_value()) 96 | .unwrap_or(false); 97 | 98 | if is_cached { 99 | // unwrap is ok because if is_cached is true we must have had a value. 100 | self.connector.get().unwrap().native_tls_connector.clone() 101 | } else { 102 | build_connector(tls_config)?.native_tls_connector 103 | } 104 | } else { 105 | // Initialize the connector on first run. 106 | let connector_ref = match self.connector.get() { 107 | Some(v) => v, 108 | None => { 109 | // This is unlikely to be racy, but if it is, doesn't matter much. 110 | let c = build_connector(tls_config)?; 111 | // Maybe someone else set it first. Weird, but ok. 112 | let _ = self.connector.set(c); 113 | self.connector.get().unwrap() 114 | } 115 | }; 116 | 117 | connector_ref.native_tls_connector.clone() // cheap clone due to Arc 118 | }; 119 | 120 | Ok(connector) 121 | } 122 | } 123 | 124 | fn build_connector(tls_config: &TlsConfig) -> Result { 125 | let mut builder = TlsConnector::builder(); 126 | 127 | if tls_config.disable_verification { 128 | debug!("Certificate verification disabled"); 129 | builder.danger_accept_invalid_certs(true); 130 | builder.danger_accept_invalid_hostnames(true); 131 | } else { 132 | match &tls_config.root_certs { 133 | RootCerts::Specific(certs) => { 134 | // Only use the specific roots. 135 | builder.disable_built_in_roots(true); 136 | add_valid_der(certs.iter().map(|c| c.der()), &mut builder); 137 | } 138 | RootCerts::PlatformVerifier => { 139 | // We only use the built-in roots. 140 | builder.disable_built_in_roots(false); 141 | } 142 | RootCerts::WebPki => { 143 | // Only use the specific roots. 144 | builder.disable_built_in_roots(true); 145 | let certs = webpki_root_certs::TLS_SERVER_ROOT_CERTS 146 | .iter() 147 | .map(|c| c.as_ref()); 148 | add_valid_der(certs, &mut builder); 149 | } 150 | } 151 | } 152 | 153 | if let Some(certs_and_key) = &tls_config.client_cert { 154 | let (certs, key) = &*certs_and_key.0; 155 | let certs_pem = certs 156 | .iter() 157 | .map(|c| pemify(c.der(), "CERTIFICATE")) 158 | .collect::>()?; 159 | 160 | let key_pem = pemify(key.der(), "PRIVATE KEY")?; 161 | 162 | debug!("Use client certficiate with key kind {:?}", key.kind()); 163 | 164 | let identity = Identity::from_pkcs8(certs_pem.as_bytes(), key_pem.as_bytes())?; 165 | builder.identity(identity); 166 | } 167 | 168 | builder.use_sni(tls_config.use_sni); 169 | 170 | if !tls_config.use_sni { 171 | debug!("Disable SNI"); 172 | } 173 | 174 | let conn = builder.build()?; 175 | 176 | let cached = CachedNativeTlsConnector { 177 | config_hash: tls_config.hash_value(), 178 | native_tls_connector: Arc::new(conn), 179 | }; 180 | 181 | Ok(cached) 182 | } 183 | 184 | fn add_valid_der<'a, C>(certs: C, builder: &mut TlsConnectorBuilder) 185 | where 186 | C: Iterator, 187 | { 188 | let mut added = 0; 189 | let mut ignored = 0; 190 | for der in certs { 191 | let c = match Certificate::from_der(der) { 192 | Ok(v) => v, 193 | Err(e) => { 194 | // Invalid/expired/broken root certs are expected 195 | // in a native root store. 196 | trace!("Ignore invalid root cert: {}", e); 197 | ignored += 1; 198 | continue; 199 | } 200 | }; 201 | builder.add_root_certificate(c); 202 | added += 1; 203 | } 204 | debug!("Added {} and ignored {} root certs", added, ignored); 205 | } 206 | 207 | fn pemify(der: &[u8], label: &'static str) -> Result { 208 | let doc = Document::try_from(der)?; 209 | let pem = doc.to_pem(label, LineEnding::LF)?; 210 | Ok(pem) 211 | } 212 | 213 | pub struct NativeTlsTransport { 214 | buffers: LazyBuffers, 215 | stream: LazyStream, 216 | } 217 | 218 | impl Transport for NativeTlsTransport { 219 | fn buffers(&mut self) -> &mut dyn Buffers { 220 | &mut self.buffers 221 | } 222 | 223 | fn transmit_output(&mut self, amount: usize, timeout: NextTimeout) -> Result<(), Error> { 224 | let stream = self.stream.handshaken()?; 225 | stream.get_mut().set_timeout(timeout); 226 | 227 | let output = &self.buffers.output()[..amount]; 228 | stream.write_all(output)?; 229 | 230 | Ok(()) 231 | } 232 | 233 | fn await_input(&mut self, timeout: NextTimeout) -> Result { 234 | let stream = self.stream.handshaken()?; 235 | stream.get_mut().set_timeout(timeout); 236 | 237 | let input = self.buffers.input_append_buf(); 238 | let amount = stream.read(input)?; 239 | self.buffers.input_appended(amount); 240 | 241 | Ok(amount > 0) 242 | } 243 | 244 | fn is_open(&mut self) -> bool { 245 | self.stream 246 | .handshaken() 247 | .map(|c| c.get_mut().get_mut().is_open()) 248 | .unwrap_or(false) 249 | } 250 | 251 | fn is_tls(&self) -> bool { 252 | true 253 | } 254 | } 255 | 256 | /// Helper to delay the handshake until we are starting IO. 257 | /// This normalizes native-tls to behave like rustls. 258 | enum LazyStream { 259 | Unstarted(Option<(Arc, String, TransportAdapter)>), 260 | Started(TlsStream), 261 | } 262 | 263 | impl LazyStream { 264 | fn handshaken(&mut self) -> Result<&mut TlsStream, Error> { 265 | match self { 266 | LazyStream::Unstarted(v) => { 267 | let (conn, domain, adapter) = v.take().unwrap(); 268 | let stream = conn.connect(&domain, adapter).map_err(|e| match e { 269 | HandshakeError::Failure(e) => e, 270 | HandshakeError::WouldBlock(_) => unreachable!(), 271 | })?; 272 | *self = LazyStream::Started(stream); 273 | // Next time we hit the other match arm 274 | self.handshaken() 275 | } 276 | LazyStream::Started(v) => Ok(v), 277 | } 278 | } 279 | } 280 | impl fmt::Debug for NativeTlsConnector { 281 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 282 | f.debug_struct("NativeTlsConnector").finish() 283 | } 284 | } 285 | 286 | impl fmt::Debug for NativeTlsTransport { 287 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 288 | f.debug_struct("NativeTlsTransport").finish() 289 | } 290 | } 291 | -------------------------------------------------------------------------------- /src/tls/rustls.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryInto; 2 | use std::fmt; 3 | use std::io::{Read, Write}; 4 | use std::sync::{Arc, OnceLock}; 5 | 6 | use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}; 7 | use rustls::crypto::CryptoProvider; 8 | use rustls::{ClientConfig, ClientConnection, RootCertStore, StreamOwned, ALL_VERSIONS}; 9 | use rustls_pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs1KeyDer, PrivatePkcs8KeyDer}; 10 | use rustls_pki_types::{PrivateSec1KeyDer, ServerName}; 11 | 12 | use crate::tls::cert::KeyKind; 13 | use crate::tls::{RootCerts, TlsProvider}; 14 | use crate::transport::{Buffers, ConnectionDetails, Connector, LazyBuffers}; 15 | use crate::transport::{Either, NextTimeout, Transport, TransportAdapter}; 16 | use crate::Error; 17 | 18 | use super::TlsConfig; 19 | 20 | /// Wrapper for TLS using rustls. 21 | /// 22 | /// Requires feature flag **rustls**. 23 | #[derive(Default)] 24 | pub struct RustlsConnector { 25 | config: OnceLock, 26 | } 27 | 28 | struct CachedRustlConfig { 29 | config_hash: u64, 30 | rustls_config: Arc, 31 | } 32 | 33 | impl Connector for RustlsConnector { 34 | type Out = Either; 35 | 36 | fn connect( 37 | &self, 38 | details: &ConnectionDetails, 39 | chained: Option, 40 | ) -> Result, Error> { 41 | let Some(transport) = chained else { 42 | panic!("RustlConnector requires a chained transport"); 43 | }; 44 | 45 | // Only add TLS if we are connecting via HTTPS and the transport isn't TLS 46 | // already, otherwise use chained transport as is. 47 | if !details.needs_tls() || transport.is_tls() { 48 | trace!("Skip"); 49 | return Ok(Some(Either::A(transport))); 50 | } 51 | 52 | if details.config.tls_config().provider != TlsProvider::Rustls { 53 | debug!("Skip because config is not set to Rustls"); 54 | return Ok(Some(Either::A(transport))); 55 | } 56 | 57 | trace!("Try wrap in TLS"); 58 | 59 | let config = self.get_cached_config(details); 60 | 61 | let name_borrowed: ServerName<'_> = details 62 | .uri 63 | .authority() 64 | .expect("uri authority for tls") 65 | .host() 66 | .try_into() 67 | .map_err(|e| { 68 | debug!("rustls invalid dns name: {}", e); 69 | Error::Tls("Rustls invalid dns name error") 70 | })?; 71 | 72 | let name = name_borrowed.to_owned(); 73 | 74 | let conn = ClientConnection::new(config, name)?; 75 | let stream = StreamOwned { 76 | conn, 77 | sock: TransportAdapter::new(transport.boxed()), 78 | }; 79 | 80 | let buffers = LazyBuffers::new( 81 | details.config.input_buffer_size(), 82 | details.config.output_buffer_size(), 83 | ); 84 | 85 | let transport = RustlsTransport { buffers, stream }; 86 | 87 | debug!("Wrapped TLS"); 88 | 89 | Ok(Some(Either::B(transport))) 90 | } 91 | } 92 | 93 | impl RustlsConnector { 94 | fn get_cached_config(&self, details: &ConnectionDetails) -> Arc { 95 | let tls_config = details.config.tls_config(); 96 | 97 | if details.request_level { 98 | // If the TlsConfig is request level, it is not allowed to 99 | // initialize the self.config OnceLock, but it should 100 | // reuse the cached value if it is the same TlsConfig 101 | // by comparing the config_hash value. 102 | 103 | let is_cached = self 104 | .config 105 | .get() 106 | .map(|c| c.config_hash == tls_config.hash_value()) 107 | .unwrap_or(false); 108 | 109 | if is_cached { 110 | // unwrap is ok because if is_cached is true we must have had a value. 111 | self.config.get().unwrap().rustls_config.clone() 112 | } else { 113 | build_config(tls_config).rustls_config 114 | } 115 | } else { 116 | // On agent level, we initialize the config on first run. This is 117 | // the value we want to cache. 118 | let config_ref = self.config.get_or_init(|| build_config(tls_config)); 119 | 120 | config_ref.rustls_config.clone() 121 | } 122 | } 123 | } 124 | 125 | fn build_config(tls_config: &TlsConfig) -> CachedRustlConfig { 126 | // 1. Prefer provider set by TlsConfig. 127 | // 2. Use process wide default set in rustls library. 128 | // 3. Pick ring, if it is enabled (the default behavior). 129 | // 4. Error (never pick up a default from feature flags alone). 130 | let provider = tls_config 131 | .rustls_crypto_provider 132 | .clone() 133 | .or(rustls::crypto::CryptoProvider::get_default().cloned()) 134 | .unwrap_or_else(ring_if_enabled); 135 | 136 | #[cfg(feature = "_ring")] 137 | fn ring_if_enabled() -> Arc { 138 | Arc::new(rustls::crypto::ring::default_provider()) 139 | } 140 | 141 | #[cfg(not(feature = "_ring"))] 142 | fn ring_if_enabled() -> Arc { 143 | panic!( 144 | "No CryptoProvider for Rustls. Either enable feature `rustls`, or set process 145 | default using CryptoProvider::set_default(), or configure 146 | TlsConfig::rustls_crypto_provider()" 147 | ); 148 | } 149 | 150 | let builder = ClientConfig::builder_with_provider(provider.clone()) 151 | .with_protocol_versions(ALL_VERSIONS) 152 | .expect("all TLS versions"); 153 | 154 | let builder = if tls_config.disable_verification { 155 | debug!("Certificate verification disabled"); 156 | builder 157 | .dangerous() 158 | .with_custom_certificate_verifier(Arc::new(DisabledVerifier)) 159 | } else { 160 | match &tls_config.root_certs { 161 | RootCerts::Specific(certs) => { 162 | let root_certs = certs.iter().map(|c| CertificateDer::from(c.der())); 163 | 164 | let mut root_store = RootCertStore::empty(); 165 | let (added, ignored) = root_store.add_parsable_certificates(root_certs); 166 | debug!("Added {} and ignored {} root certs", added, ignored); 167 | 168 | builder.with_root_certificates(root_store) 169 | } 170 | #[cfg(not(feature = "platform-verifier"))] 171 | RootCerts::PlatformVerifier => { 172 | panic!("Rustls + PlatformVerifier requires feature: platform-verifier"); 173 | } 174 | #[cfg(feature = "platform-verifier")] 175 | RootCerts::PlatformVerifier => builder 176 | // This actually not dangerous. The rustls_platform_verifier is safe. 177 | .dangerous() 178 | .with_custom_certificate_verifier(Arc::new( 179 | rustls_platform_verifier::Verifier::new().with_provider(provider), 180 | )), 181 | RootCerts::WebPki => { 182 | let root_store = RootCertStore { 183 | roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), 184 | }; 185 | builder.with_root_certificates(root_store) 186 | } 187 | } 188 | }; 189 | 190 | let mut config = if let Some(certs_and_key) = &tls_config.client_cert { 191 | let cert_chain = certs_and_key 192 | .certs() 193 | .iter() 194 | .map(|c| CertificateDer::from(c.der()).into_owned()); 195 | 196 | let key = certs_and_key.private_key(); 197 | 198 | let key_der = match key.kind() { 199 | KeyKind::Pkcs1 => PrivateKeyDer::Pkcs1(PrivatePkcs1KeyDer::from(key.der())), 200 | KeyKind::Pkcs8 => PrivateKeyDer::Pkcs8(PrivatePkcs8KeyDer::from(key.der())), 201 | KeyKind::Sec1 => PrivateKeyDer::Sec1(PrivateSec1KeyDer::from(key.der())), 202 | } 203 | .clone_key(); 204 | debug!("Use client certficiate with key kind {:?}", key.kind()); 205 | 206 | builder 207 | .with_client_auth_cert(cert_chain.collect(), key_der) 208 | .expect("valid client auth certificate") 209 | } else { 210 | builder.with_no_client_auth() 211 | }; 212 | 213 | config.enable_sni = tls_config.use_sni; 214 | 215 | if !tls_config.use_sni { 216 | debug!("Disable SNI"); 217 | } 218 | 219 | CachedRustlConfig { 220 | config_hash: tls_config.hash_value(), 221 | rustls_config: Arc::new(config), 222 | } 223 | } 224 | 225 | pub struct RustlsTransport { 226 | buffers: LazyBuffers, 227 | stream: StreamOwned, 228 | } 229 | 230 | impl Transport for RustlsTransport { 231 | fn buffers(&mut self) -> &mut dyn Buffers { 232 | &mut self.buffers 233 | } 234 | 235 | fn transmit_output(&mut self, amount: usize, timeout: NextTimeout) -> Result<(), Error> { 236 | self.stream.get_mut().set_timeout(timeout); 237 | 238 | let output = &self.buffers.output()[..amount]; 239 | self.stream.write_all(output)?; 240 | 241 | Ok(()) 242 | } 243 | 244 | fn await_input(&mut self, timeout: NextTimeout) -> Result { 245 | self.stream.get_mut().set_timeout(timeout); 246 | 247 | let input = self.buffers.input_append_buf(); 248 | let amount = self.stream.read(input)?; 249 | self.buffers.input_appended(amount); 250 | 251 | Ok(amount > 0) 252 | } 253 | 254 | fn is_open(&mut self) -> bool { 255 | self.stream.get_mut().get_mut().is_open() 256 | } 257 | 258 | fn is_tls(&self) -> bool { 259 | true 260 | } 261 | } 262 | 263 | #[derive(Debug)] 264 | struct DisabledVerifier; 265 | 266 | impl ServerCertVerifier for DisabledVerifier { 267 | fn verify_server_cert( 268 | &self, 269 | _end_entity: &CertificateDer<'_>, 270 | _intermediates: &[CertificateDer<'_>], 271 | _server_name: &rustls_pki_types::ServerName<'_>, 272 | _ocsp_response: &[u8], 273 | _now: rustls_pki_types::UnixTime, 274 | ) -> Result { 275 | Ok(ServerCertVerified::assertion()) 276 | } 277 | 278 | fn verify_tls12_signature( 279 | &self, 280 | _message: &[u8], 281 | _cert: &CertificateDer<'_>, 282 | _dss: &rustls::DigitallySignedStruct, 283 | ) -> Result { 284 | Ok(HandshakeSignatureValid::assertion()) 285 | } 286 | 287 | fn verify_tls13_signature( 288 | &self, 289 | _message: &[u8], 290 | _cert: &CertificateDer<'_>, 291 | _dss: &rustls::DigitallySignedStruct, 292 | ) -> Result { 293 | Ok(HandshakeSignatureValid::assertion()) 294 | } 295 | 296 | fn supported_verify_schemes(&self) -> Vec { 297 | vec![ 298 | rustls::SignatureScheme::RSA_PKCS1_SHA1, 299 | rustls::SignatureScheme::RSA_PKCS1_SHA256, 300 | rustls::SignatureScheme::RSA_PKCS1_SHA384, 301 | rustls::SignatureScheme::RSA_PKCS1_SHA512, 302 | rustls::SignatureScheme::ECDSA_NISTP256_SHA256, 303 | rustls::SignatureScheme::ECDSA_NISTP384_SHA384, 304 | rustls::SignatureScheme::ECDSA_NISTP521_SHA512, 305 | rustls::SignatureScheme::RSA_PSS_SHA256, 306 | rustls::SignatureScheme::RSA_PSS_SHA384, 307 | rustls::SignatureScheme::RSA_PSS_SHA512, 308 | rustls::SignatureScheme::ED25519, 309 | rustls::SignatureScheme::ED448, 310 | ] 311 | } 312 | } 313 | 314 | impl fmt::Debug for RustlsConnector { 315 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 316 | f.debug_struct("RustlsConnector").finish() 317 | } 318 | } 319 | 320 | impl fmt::Debug for RustlsTransport { 321 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 322 | f.debug_struct("RustlsTransport") 323 | .field("chained", &self.stream.sock.inner()) 324 | .finish() 325 | } 326 | } 327 | -------------------------------------------------------------------------------- /src/unversioned/mod.rs: -------------------------------------------------------------------------------- 1 | //! API that does not (yet) follow semver. 2 | //! 3 | //! All public types under `unversioned` are available to use, but are not considered final 4 | //! API in the semver sense. Breaking changes to anything under the module `unversioned`, 5 | //! like `Transport` or `Resolver` will NOT be reflected in a major version bump of the 6 | //! `ureq` crate. We do however commit to only make such changes in *minor* version bumps, 7 | //! not patch. 8 | //! 9 | //! In time, we will move these types out of `unversioned` and solidify the API. There 10 | //! is no set timeline for this. 11 | 12 | pub mod resolver; 13 | pub mod transport; 14 | -------------------------------------------------------------------------------- /src/unversioned/resolver.rs: -------------------------------------------------------------------------------- 1 | //! Name resolvers. 2 | //! 3 | //! **NOTE resolver does not (yet) [follow semver][super].** 4 | //! 5 | //! _NOTE: Resolver is deep configuration of ureq and is not required for regular use._ 6 | //! 7 | //! Name resolving is pluggable. The resolver's duty is to take a URI and translate it 8 | //! to a socket address (IP + port). This is done as a separate step in regular ureq use. 9 | //! The hostname is looked up and provided to the [`Connector`](crate::transport::Connector). 10 | //! 11 | //! In some situations it might be desirable to not do this lookup, or to use another system 12 | //! than DNS for it. 13 | use std::fmt::{self, Debug}; 14 | use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4, ToSocketAddrs}; 15 | use std::sync::mpsc::{self, RecvTimeoutError}; 16 | use std::thread::{self}; 17 | use std::vec::IntoIter; 18 | 19 | use http::uri::{Authority, Scheme}; 20 | use http::Uri; 21 | 22 | use crate::config::Config; 23 | use crate::http; 24 | use crate::transport::NextTimeout; 25 | use crate::util::{SchemeExt, UriExt}; 26 | use crate::Error; 27 | 28 | /// Trait for name resolvers. 29 | pub trait Resolver: Debug + Send + Sync + 'static { 30 | /// Resolve the URI to a socket address. 31 | /// 32 | /// The implementation should resolve within the given _timeout_. 33 | fn resolve( 34 | &self, 35 | uri: &Uri, 36 | config: &Config, 37 | timeout: NextTimeout, 38 | ) -> Result; 39 | } 40 | 41 | /// Max number of socket addresses to keep from the resolver. 42 | const MAX_ADDRS: usize = 16; 43 | 44 | pub use ureq_proto::ArrayVec; 45 | 46 | /// Addresses as returned by the resolver. 47 | pub type ResolvedSocketAddrs = ArrayVec; 48 | 49 | /// Default resolver implementation. 50 | /// 51 | /// Uses std::net [`ToSocketAddrs`](https://doc.rust-lang.org/std/net/trait.ToSocketAddrs.html) to 52 | /// do the lookup. Can optionally spawn a thread to abort lookup if the relevant timeout is set. 53 | #[derive(Default)] 54 | pub struct DefaultResolver { 55 | _private: (), 56 | } 57 | 58 | impl DefaultResolver { 59 | /// Helper to combine scheme host and port to a single string. 60 | /// 61 | /// This knows about the default ports for http, https and socks proxies which 62 | /// can then be omitted from the `Authority`. 63 | pub fn host_and_port(scheme: &Scheme, authority: &Authority) -> Option { 64 | let port = authority.port_u16().or_else(|| scheme.default_port())?; 65 | 66 | Some(format!("{}:{}", authority.host(), port)) 67 | } 68 | } 69 | 70 | impl Resolver for DefaultResolver { 71 | fn resolve( 72 | &self, 73 | uri: &Uri, 74 | config: &Config, 75 | timeout: NextTimeout, 76 | ) -> Result { 77 | uri.ensure_valid_url()?; 78 | 79 | // unwrap is ok due to ensure_full_url() above. 80 | let scheme = uri.scheme().unwrap(); 81 | let authority = uri.authority().unwrap(); 82 | 83 | if cfg!(feature = "_test") { 84 | let mut v = ArrayVec::from_fn(|_| "0.0.0.0:1".parse().unwrap()); 85 | v.push(SocketAddr::V4(SocketAddrV4::new( 86 | Ipv4Addr::new(10, 0, 0, 1), 87 | authority 88 | .port_u16() 89 | .or_else(|| scheme.default_port()) 90 | // unwrap is ok because ensure_valid_url() above. 91 | .unwrap(), 92 | ))); 93 | return Ok(v); 94 | } 95 | 96 | // This will be on the form "myspecialhost.org:1234". The port is mandatory. 97 | // unwrap is ok because ensure_valid_url() above. 98 | let addr = DefaultResolver::host_and_port(scheme, authority).unwrap(); 99 | 100 | // Determine if we want to use the async behavior. 101 | let use_sync = timeout.after.is_not_happening(); 102 | 103 | let iter = if use_sync { 104 | trace!("Resolve: {}", addr); 105 | // When timeout is not set, we do not spawn any threads. 106 | addr.to_socket_addrs()? 107 | } else { 108 | trace!("Resolve with timeout ({:?}): {} ", timeout, addr); 109 | resolve_async(addr, timeout)? 110 | }; 111 | 112 | let ip_family = config.ip_family(); 113 | let wanted = ip_family.keep_wanted(iter); 114 | 115 | fn uninited_socketaddr() -> SocketAddr { 116 | SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0) 117 | } 118 | 119 | let mut result: ResolvedSocketAddrs = ArrayVec::from_fn(|_| uninited_socketaddr()); 120 | for addr in wanted.take(MAX_ADDRS) { 121 | result.push(addr); 122 | } 123 | 124 | debug!("Resolved: {:?}", result); 125 | 126 | if result.is_empty() { 127 | Err(Error::HostNotFound) 128 | } else { 129 | Ok(result) 130 | } 131 | } 132 | } 133 | 134 | fn resolve_async(addr: String, timeout: NextTimeout) -> Result, Error> { 135 | // TODO(martin): On Linux we have getaddrinfo_a which is a libc async way of 136 | // doing host lookup. We should make a subcrate that uses a native async method 137 | // when possible, and otherwise fall back on this thread behavior. 138 | let (tx, rx) = mpsc::sync_channel(1); 139 | thread::spawn(move || tx.send(addr.to_socket_addrs()).ok()); 140 | 141 | match rx.recv_timeout(*timeout.after) { 142 | Ok(v) => Ok(v?), 143 | Err(c) => match c { 144 | // Timeout results in None 145 | RecvTimeoutError::Timeout => Err(Error::Timeout(timeout.reason)), 146 | // The sender going away is nonsensical. Did the thread just die? 147 | RecvTimeoutError::Disconnected => unreachable!("mpsc sender gone"), 148 | }, 149 | } 150 | } 151 | 152 | impl fmt::Debug for DefaultResolver { 153 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 154 | f.debug_struct("DefaultResolver").finish() 155 | } 156 | } 157 | 158 | #[cfg(test)] 159 | mod test { 160 | use crate::transport::time::Duration; 161 | 162 | use super::*; 163 | 164 | #[test] 165 | fn unknown_scheme() { 166 | let uri: Uri = "foo://some:42/123".parse().unwrap(); 167 | let config = Config::default(); 168 | let err = DefaultResolver::default() 169 | .resolve( 170 | &uri, 171 | &config, 172 | NextTimeout { 173 | after: Duration::NotHappening, 174 | reason: crate::Timeout::Global, 175 | }, 176 | ) 177 | .unwrap_err(); 178 | assert!(matches!(err, Error::BadUri(_))); 179 | assert_eq!(err.to_string(), "bad uri: unknown scheme: foo"); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/unversioned/transport/buf.rs: -------------------------------------------------------------------------------- 1 | use crate::util::ConsumeBuf; 2 | 3 | /// Abstraction over input/output buffers. 4 | /// 5 | /// In ureq, the buffers are provided by the [`Transport`](crate::transport::Transport). 6 | pub trait Buffers { 7 | /// Mut handle to output buffers to write new data. Data is always 8 | /// written from `0..`. 9 | fn output(&mut self) -> &mut [u8]; 10 | 11 | /// Unconsumed bytes in the input buffer as read only. 12 | /// 13 | /// The input buffer is written to by using [`Buffers::input_append_buf`] followed by 14 | /// [`Buffers::input_appended`] to indiciate how many additional bytes were added to the 15 | /// input. 16 | /// 17 | /// This buffer should return the total unconsumed bytes. 18 | /// 19 | /// Example: if the internal buffer is `input: Vec`, and we have counters for 20 | /// `filled: usize` and `consumed: usize`. This returns `&input[consumed..filled]`. 21 | fn input(&self) -> &[u8]; 22 | 23 | /// Input buffer to write to. This can be called despite there being unconsumed bytes 24 | /// left in the buffer already. 25 | /// 26 | /// Example: if the internal buffer is `input: Vec`, and we have counters for 27 | /// `filled: usize` and `consumed: usize`. This returns `&mut input[filled..]`. 28 | fn input_append_buf(&mut self) -> &mut [u8]; 29 | 30 | /// Add a number of read bytes into [`Buffers::input_append_buf()`]. 31 | /// 32 | /// Example: if the internal buffer is `input: Vec`, and we have counters for 33 | /// `filled: usize` and `consumed: usize`, this increases `filled`. 34 | fn input_appended(&mut self, amount: usize); 35 | 36 | /// Consume a number of bytes from `&input`. 37 | /// 38 | /// Example: if the internal buffer is `input: Vec`, and we have counters for 39 | /// `filled: usize` and `consumed: usize`, this increases `consumed`. 40 | fn input_consume(&mut self, amount: usize); 41 | 42 | /// Helper to get a scratch buffer (`tmp`) and the output buffer. This is used when 43 | /// sending the request body in which case we use a `Read` trait to read from the 44 | /// [`SendBody`](crate::SendBody) into tmp and then write it to the output buffer. 45 | fn tmp_and_output(&mut self) -> (&mut [u8], &mut [u8]); 46 | 47 | /// Helper to determine if the `&input` already holds unconsumed data or we need to 48 | /// read more input from the transport. This indicates two things: 49 | /// 50 | /// 1. There is unconsumed data in the input buffer 51 | /// 2. The last call to consume was > 0. 52 | /// 53 | /// Step 2 is because the input buffer might contain half a response body, and we 54 | /// cannot parse it until we got the entire buffer. In this case the transport must 55 | /// read more data first. 56 | fn can_use_input(&self) -> bool; 57 | } 58 | 59 | /// Default buffer implementation. 60 | /// 61 | /// The buffers are lazy such that no allocations are made until needed. That means 62 | /// a [`Transport`](crate::transport::Transport) implementation can freely instantiate 63 | /// the `LazyBuffers`. 64 | #[derive(Debug)] 65 | pub struct LazyBuffers { 66 | input_size: usize, 67 | output_size: usize, 68 | 69 | input: ConsumeBuf, 70 | output: Vec, 71 | 72 | progress: bool, 73 | } 74 | 75 | impl LazyBuffers { 76 | /// Create a new buffer. 77 | /// 78 | /// The sizes provided are not allocated until we need to. 79 | pub fn new(input_size: usize, output_size: usize) -> Self { 80 | assert!(input_size > 0); 81 | assert!(output_size > 0); 82 | 83 | LazyBuffers { 84 | input_size, 85 | output_size, 86 | 87 | // Vectors don't allocate until they get a size. 88 | input: ConsumeBuf::new(0), 89 | output: vec![], 90 | 91 | progress: false, 92 | } 93 | } 94 | 95 | fn ensure_allocation(&mut self) { 96 | if self.output.len() < self.output_size { 97 | self.output.resize(self.output_size, 0); 98 | } 99 | if self.input.unconsumed().len() < self.input_size { 100 | self.input.resize(self.input_size); 101 | } 102 | } 103 | } 104 | 105 | impl Buffers for LazyBuffers { 106 | fn output(&mut self) -> &mut [u8] { 107 | self.ensure_allocation(); 108 | &mut self.output 109 | } 110 | 111 | fn input(&self) -> &[u8] { 112 | self.input.unconsumed() 113 | } 114 | 115 | fn input_append_buf(&mut self) -> &mut [u8] { 116 | self.ensure_allocation(); 117 | self.input.free_mut() 118 | } 119 | 120 | fn tmp_and_output(&mut self) -> (&mut [u8], &mut [u8]) { 121 | self.ensure_allocation(); 122 | const MIN_TMP_SIZE: usize = 10 * 1024; 123 | 124 | let tmp_available = self.input.free_mut().len(); 125 | 126 | if tmp_available < MIN_TMP_SIZE { 127 | // The tmp space is used for reading the request body from the 128 | // Body as a Read. There's an outside chance there isn't any space 129 | // left in the input buffer if we have done Await100 and the peer 130 | // started sending a ton of data before we asked for it. 131 | // It's a pathological situation that we don't need to make work well. 132 | let needed = MIN_TMP_SIZE - tmp_available; 133 | self.input.resize(self.input.unconsumed().len() + needed); 134 | } 135 | 136 | (self.input.free_mut(), &mut self.output) 137 | } 138 | 139 | fn input_appended(&mut self, amount: usize) { 140 | self.input.add_filled(amount); 141 | } 142 | 143 | fn input_consume(&mut self, amount: usize) { 144 | self.progress = amount > 0; 145 | self.input.consume(amount); 146 | } 147 | 148 | fn can_use_input(&self) -> bool { 149 | !self.input.unconsumed().is_empty() && self.progress 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/unversioned/transport/chain.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::marker::PhantomData; 3 | 4 | use super::{Connector, Transport}; 5 | 6 | /// Two chained connectors called one after another. 7 | /// 8 | /// Created by calling [`Connector::chain`] on the first connector. 9 | pub struct ChainedConnector(First, Second, PhantomData); 10 | 11 | impl Connector for ChainedConnector 12 | where 13 | In: Transport, 14 | First: Connector, 15 | Second: Connector, 16 | { 17 | type Out = Second::Out; 18 | 19 | fn connect( 20 | &self, 21 | details: &super::ConnectionDetails, 22 | chained: Option, 23 | ) -> Result, crate::Error> { 24 | let f_out = self.0.connect(details, chained)?; 25 | self.1.connect(details, f_out) 26 | } 27 | } 28 | 29 | impl ChainedConnector { 30 | pub(crate) fn new(first: First, second: Second) -> Self { 31 | ChainedConnector(first, second, PhantomData) 32 | } 33 | } 34 | 35 | impl fmt::Debug for ChainedConnector 36 | where 37 | In: Transport, 38 | First: Connector, 39 | Second: Connector, 40 | { 41 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 42 | f.debug_tuple("ChainedConnector") 43 | .field(&self.0) 44 | .field(&self.1) 45 | .finish() 46 | } 47 | } 48 | 49 | impl Clone for ChainedConnector 50 | where 51 | In: Transport, 52 | First: Connector + Clone, 53 | Second: Connector + Clone, 54 | { 55 | fn clone(&self) -> Self { 56 | ChainedConnector(self.0.clone(), self.1.clone(), PhantomData) 57 | } 58 | } 59 | 60 | /// A selection between two transports. 61 | #[derive(Debug)] 62 | pub enum Either { 63 | /// The first transport. 64 | A(A), 65 | /// The second transport. 66 | B(B), 67 | } 68 | 69 | impl Transport for Either { 70 | fn buffers(&mut self) -> &mut dyn super::Buffers { 71 | match self { 72 | Either::A(a) => a.buffers(), 73 | Either::B(b) => b.buffers(), 74 | } 75 | } 76 | 77 | fn transmit_output( 78 | &mut self, 79 | amount: usize, 80 | timeout: super::NextTimeout, 81 | ) -> Result<(), crate::Error> { 82 | match self { 83 | Either::A(a) => a.transmit_output(amount, timeout), 84 | Either::B(b) => b.transmit_output(amount, timeout), 85 | } 86 | } 87 | 88 | fn await_input(&mut self, timeout: super::NextTimeout) -> Result { 89 | match self { 90 | Either::A(a) => a.await_input(timeout), 91 | Either::B(b) => b.await_input(timeout), 92 | } 93 | } 94 | 95 | fn is_open(&mut self) -> bool { 96 | match self { 97 | Either::A(a) => a.is_open(), 98 | Either::B(b) => b.is_open(), 99 | } 100 | } 101 | 102 | fn is_tls(&self) -> bool { 103 | match self { 104 | Either::A(a) => a.is_tls(), 105 | Either::B(b) => b.is_tls(), 106 | } 107 | } 108 | } 109 | 110 | // Connector is implemented for () to start a chain of connectors. 111 | // 112 | // The `Out` transport is supposedly `()`, but this is never instantiated. 113 | impl Connector<()> for () { 114 | type Out = (); 115 | 116 | fn connect( 117 | &self, 118 | _: &super::ConnectionDetails, 119 | _: Option<()>, 120 | ) -> Result, crate::Error> { 121 | Ok(None) 122 | } 123 | } 124 | 125 | // () is a valid Transport for type reasons. 126 | // 127 | // It should never be instantiated as an actual transport. 128 | impl Transport for () { 129 | fn buffers(&mut self) -> &mut dyn super::Buffers { 130 | panic!("Unit transport is not valid") 131 | } 132 | 133 | fn transmit_output(&mut self, _: usize, _: super::NextTimeout) -> Result<(), crate::Error> { 134 | panic!("Unit transport is not valid") 135 | } 136 | 137 | fn await_input(&mut self, _: super::NextTimeout) -> Result { 138 | panic!("Unit transport is not valid") 139 | } 140 | 141 | fn is_open(&mut self) -> bool { 142 | panic!("Unit transport is not valid") 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/unversioned/transport/io.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use crate::Timeout; 4 | 5 | use super::time::Duration; 6 | use super::{NextTimeout, Transport}; 7 | 8 | /// Helper to turn a [`Transport`] into a std::io [`Read`](io::Read) and [`Write`](io::Write). 9 | /// 10 | /// This is useful when integrating with components that expect a regular `Read`/`Write`. In 11 | /// ureq this is used both for the [`RustlsConnector`](crate::unversioned::transport::RustlsConnector) and the 12 | /// [`NativeTlsConnector`](crate::unversioned::transport::NativeTlsConnector). 13 | pub struct TransportAdapter> { 14 | timeout: NextTimeout, 15 | transport: T, 16 | } 17 | 18 | impl TransportAdapter { 19 | /// Creates a new adapter 20 | pub fn new(transport: T) -> Self { 21 | Self { 22 | timeout: NextTimeout { 23 | after: Duration::NotHappening, 24 | reason: Timeout::Global, 25 | }, 26 | transport, 27 | } 28 | } 29 | 30 | /// Set a new value of the timeout. 31 | pub fn set_timeout(&mut self, timeout: NextTimeout) { 32 | self.timeout = timeout; 33 | } 34 | 35 | /// Reference to the adapted transport 36 | pub fn get_ref(&self) -> &dyn Transport { 37 | &self.transport 38 | } 39 | 40 | /// Mut reference to the adapted transport 41 | pub fn get_mut(&mut self) -> &mut dyn Transport { 42 | &mut self.transport 43 | } 44 | 45 | /// Reference to the inner transport. 46 | pub fn inner(&self) -> &dyn Transport { 47 | &self.transport 48 | } 49 | 50 | /// Turn the adapter back into the wrapped transport 51 | pub fn into_inner(self) -> T { 52 | self.transport 53 | } 54 | } 55 | 56 | impl io::Read for TransportAdapter { 57 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 58 | self.transport 59 | .maybe_await_input(self.timeout) 60 | .map_err(|e| e.into_io())?; 61 | let input = self.transport.buffers().input(); 62 | 63 | let max = buf.len().min(input.len()); 64 | buf[..max].copy_from_slice(&input[..max]); 65 | self.transport.buffers().input_consume(max); 66 | 67 | Ok(max) 68 | } 69 | } 70 | 71 | impl io::Write for TransportAdapter { 72 | fn write(&mut self, buf: &[u8]) -> io::Result { 73 | let output = self.transport.buffers().output(); 74 | 75 | let max = buf.len().min(output.len()); 76 | output[..max].copy_from_slice(&buf[..max]); 77 | self.transport 78 | .transmit_output(max, self.timeout) 79 | .map_err(|e| e.into_io())?; 80 | 81 | Ok(max) 82 | } 83 | 84 | fn flush(&mut self) -> io::Result<()> { 85 | Ok(()) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/unversioned/transport/socks.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::net::{SocketAddr, TcpStream}; 3 | use std::sync::mpsc::{self, RecvTimeoutError}; 4 | use std::{io, thread}; 5 | 6 | use socks::{Socks4Stream, Socks5Stream}; 7 | 8 | use crate::proxy::{Proto, Proxy}; 9 | use crate::Error; 10 | 11 | use super::chain::Either; 12 | use super::ResolvedSocketAddrs; 13 | 14 | use super::tcp::TcpTransport; 15 | use super::{ConnectionDetails, Connector, LazyBuffers, NextTimeout, Transport}; 16 | 17 | /// Connector for SOCKS proxies. 18 | /// 19 | /// Requires the **socks-proxy** feature. 20 | /// 21 | /// The connector looks at the proxy settings in [`proxy`](crate::config::ConfigBuilder::proxy) to 22 | /// determine whether to attempt a proxy connection or not. 23 | #[derive(Default)] 24 | pub struct SocksConnector(()); 25 | 26 | impl Connector for SocksConnector { 27 | type Out = Either; 28 | 29 | fn connect( 30 | &self, 31 | details: &ConnectionDetails, 32 | chained: Option, 33 | ) -> Result, Error> { 34 | let proxy = match details.config.proxy() { 35 | Some(v) if v.proto().is_socks() => v, 36 | // If there is no proxy configured, or it isn't a SOCKS proxy, use whatever is chained. 37 | _ => { 38 | trace!("SOCKS not configured"); 39 | return Ok(chained.map(Either::A)); 40 | } 41 | }; 42 | 43 | if chained.is_some() { 44 | trace!("Skip"); 45 | return Ok(chained.map(Either::A)); 46 | } 47 | 48 | let proxy_addrs = details 49 | .resolver 50 | .resolve(proxy.uri(), details.config, details.timeout)?; 51 | 52 | let stream = try_connect(&proxy_addrs, &details.addrs, proxy, details.timeout)?; 53 | 54 | if details.config.no_delay() { 55 | stream.set_nodelay(true)?; 56 | } 57 | 58 | let buffers = LazyBuffers::new( 59 | details.config.input_buffer_size(), 60 | details.config.output_buffer_size(), 61 | ); 62 | let transport = TcpTransport::new(stream, buffers); 63 | 64 | Ok(Some(Either::B(transport))) 65 | } 66 | } 67 | 68 | fn try_connect( 69 | proxy_addrs: &ResolvedSocketAddrs, 70 | target_addrs: &ResolvedSocketAddrs, 71 | proxy: &Proxy, 72 | timeout: NextTimeout, 73 | ) -> Result { 74 | for target_addr in target_addrs { 75 | for proxy_addr in proxy_addrs { 76 | trace!( 77 | "Try connect {} {} -> {}", 78 | proxy.proto(), 79 | proxy_addr, 80 | target_addr 81 | ); 82 | 83 | match try_connect_single(*proxy_addr, *target_addr, proxy, timeout) { 84 | Ok(v) => { 85 | debug!( 86 | "{} connected {} -> {}", 87 | proxy.proto(), 88 | proxy_addr, 89 | target_addr 90 | ); 91 | return Ok(v); 92 | } 93 | // Intercept ConnectionRefused to try next addrs 94 | Err(Error::Io(e)) if e.kind() == io::ErrorKind::ConnectionRefused => { 95 | trace!("{} -> {} proxy connection refused", proxy_addr, target_addr); 96 | continue; 97 | } 98 | // Other errors bail 99 | Err(e) => return Err(e), 100 | } 101 | } 102 | } 103 | 104 | debug!("Proxy failed to to connect to any resolved address"); 105 | Err(Error::Io(io::Error::new( 106 | io::ErrorKind::ConnectionRefused, 107 | "Connection refused", 108 | ))) 109 | } 110 | 111 | fn try_connect_single( 112 | proxy_addr: SocketAddr, 113 | target_addr: SocketAddr, 114 | proxy: &Proxy, 115 | timeout: NextTimeout, 116 | ) -> Result { 117 | // The async behavior is only used if we want to time cap connecting. 118 | let use_sync = timeout.after.is_not_happening(); 119 | 120 | if use_sync { 121 | connect_proxy(proxy, proxy_addr, target_addr) 122 | } else { 123 | let (tx, rx) = mpsc::sync_channel(1); 124 | let proxy = proxy.clone(); 125 | 126 | thread::spawn(move || tx.send(connect_proxy(&proxy, proxy_addr, target_addr))); 127 | 128 | match rx.recv_timeout(*timeout.after) { 129 | Ok(v) => v, 130 | Err(RecvTimeoutError::Timeout) => Err(Error::Timeout(timeout.reason)), 131 | Err(RecvTimeoutError::Disconnected) => unreachable!("mpsc sender gone"), 132 | } 133 | } 134 | } 135 | 136 | fn connect_proxy( 137 | proxy: &Proxy, 138 | proxy_addr: SocketAddr, 139 | target_addr: SocketAddr, 140 | ) -> Result { 141 | let stream = match proxy.proto() { 142 | Proto::Socks4 | Proto::Socks4A => { 143 | if proxy.username().is_some() { 144 | debug!("SOCKS4 does not support username/password"); 145 | } 146 | 147 | Socks4Stream::connect(proxy_addr, target_addr, "")?.into_inner() 148 | } 149 | Proto::Socks5 => { 150 | if let Some(username) = proxy.username() { 151 | // Connect with authentication. 152 | let password = proxy.password().unwrap_or(""); 153 | 154 | Socks5Stream::connect_with_password(proxy_addr, target_addr, username, password)? 155 | } else { 156 | Socks5Stream::connect(proxy_addr, target_addr)? 157 | } 158 | .into_inner() 159 | } 160 | _ => unreachable!(), // HTTP(s) proxies. 161 | }; 162 | 163 | Ok(stream) 164 | } 165 | 166 | impl fmt::Debug for SocksConnector { 167 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 168 | f.debug_struct("SocksConnector").finish() 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/unversioned/transport/tcp.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Write}; 2 | use std::net::{SocketAddr, TcpStream}; 3 | use std::{fmt, io, time}; 4 | 5 | use crate::config::Config; 6 | use crate::util::IoResultExt; 7 | use crate::Error; 8 | 9 | use super::chain::Either; 10 | use super::ResolvedSocketAddrs; 11 | 12 | use super::time::Duration; 13 | use super::{Buffers, ConnectionDetails, Connector, LazyBuffers, NextTimeout, Transport}; 14 | 15 | #[derive(Default)] 16 | /// Connector for regular TCP sockets. 17 | pub struct TcpConnector(()); 18 | 19 | impl Connector for TcpConnector { 20 | type Out = Either; 21 | 22 | fn connect( 23 | &self, 24 | details: &ConnectionDetails, 25 | chained: Option, 26 | ) -> Result, Error> { 27 | if chained.is_some() { 28 | // The chained connection overrides whatever we were to open here. 29 | // In the DefaultConnector chain this would be a SOCKS proxy connection. 30 | trace!("Skip"); 31 | return Ok(chained.map(Either::A)); 32 | } 33 | 34 | let config = &details.config; 35 | let stream = try_connect(&details.addrs, details.timeout, config)?; 36 | 37 | let buffers = LazyBuffers::new(config.input_buffer_size(), config.output_buffer_size()); 38 | let transport = TcpTransport::new(stream, buffers); 39 | 40 | Ok(Some(Either::B(transport))) 41 | } 42 | } 43 | 44 | fn try_connect( 45 | addrs: &ResolvedSocketAddrs, 46 | timeout: NextTimeout, 47 | config: &Config, 48 | ) -> Result { 49 | for addr in addrs { 50 | match try_connect_single(*addr, timeout, config) { 51 | // First that connects 52 | Ok(v) => return Ok(v), 53 | // Intercept ConnectionRefused to try next addrs 54 | Err(Error::Io(e)) if e.kind() == io::ErrorKind::ConnectionRefused => { 55 | trace!("{} connection refused", addr); 56 | continue; 57 | } 58 | // Other errors bail 59 | Err(e) => return Err(e), 60 | } 61 | } 62 | 63 | debug!("Failed to connect to any resolved address"); 64 | Err(Error::Io(io::Error::new( 65 | io::ErrorKind::ConnectionRefused, 66 | "Connection refused", 67 | ))) 68 | } 69 | 70 | fn try_connect_single( 71 | addr: SocketAddr, 72 | timeout: NextTimeout, 73 | config: &Config, 74 | ) -> Result { 75 | trace!("Try connect TcpStream to {}", addr); 76 | 77 | let maybe_stream = if let Some(when) = timeout.not_zero() { 78 | TcpStream::connect_timeout(&addr, *when) 79 | } else { 80 | TcpStream::connect(addr) 81 | } 82 | .normalize_would_block(); 83 | 84 | let stream = match maybe_stream { 85 | Ok(v) => v, 86 | Err(e) if e.kind() == io::ErrorKind::TimedOut => { 87 | return Err(Error::Timeout(timeout.reason)) 88 | } 89 | Err(e) => return Err(e.into()), 90 | }; 91 | 92 | if config.no_delay() { 93 | stream.set_nodelay(true)?; 94 | } 95 | 96 | debug!("Connected TcpStream to {}", addr); 97 | 98 | Ok(stream) 99 | } 100 | 101 | pub struct TcpTransport { 102 | stream: TcpStream, 103 | buffers: LazyBuffers, 104 | timeout_write: Option, 105 | timeout_read: Option, 106 | } 107 | 108 | impl TcpTransport { 109 | pub fn new(stream: TcpStream, buffers: LazyBuffers) -> TcpTransport { 110 | TcpTransport { 111 | stream, 112 | buffers, 113 | timeout_read: None, 114 | timeout_write: None, 115 | } 116 | } 117 | } 118 | 119 | // The goal here is to only cause a syscall to set the timeout if it's necessary. 120 | fn maybe_update_timeout( 121 | timeout: NextTimeout, 122 | previous: &mut Option, 123 | stream: &TcpStream, 124 | f: impl Fn(&TcpStream, Option) -> io::Result<()>, 125 | ) -> io::Result<()> { 126 | let maybe_timeout = timeout.not_zero(); 127 | 128 | if maybe_timeout != *previous { 129 | (f)(stream, maybe_timeout.map(|t| *t))?; 130 | *previous = maybe_timeout; 131 | } 132 | 133 | Ok(()) 134 | } 135 | 136 | impl Transport for TcpTransport { 137 | fn buffers(&mut self) -> &mut dyn Buffers { 138 | &mut self.buffers 139 | } 140 | 141 | fn transmit_output(&mut self, amount: usize, timeout: NextTimeout) -> Result<(), Error> { 142 | maybe_update_timeout( 143 | timeout, 144 | &mut self.timeout_write, 145 | &self.stream, 146 | TcpStream::set_write_timeout, 147 | )?; 148 | 149 | let output = &self.buffers.output()[..amount]; 150 | match self.stream.write_all(output).normalize_would_block() { 151 | Ok(v) => Ok(v), 152 | Err(e) if e.kind() == io::ErrorKind::TimedOut => Err(Error::Timeout(timeout.reason)), 153 | Err(e) => Err(e.into()), 154 | }?; 155 | 156 | Ok(()) 157 | } 158 | 159 | fn await_input(&mut self, timeout: NextTimeout) -> Result { 160 | // Proceed to fill the buffers from the TcpStream 161 | maybe_update_timeout( 162 | timeout, 163 | &mut self.timeout_read, 164 | &self.stream, 165 | TcpStream::set_read_timeout, 166 | )?; 167 | 168 | let input = self.buffers.input_append_buf(); 169 | let amount = match self.stream.read(input).normalize_would_block() { 170 | Ok(v) => Ok(v), 171 | Err(e) if e.kind() == io::ErrorKind::TimedOut => Err(Error::Timeout(timeout.reason)), 172 | Err(e) => Err(e.into()), 173 | }?; 174 | self.buffers.input_appended(amount); 175 | 176 | Ok(amount > 0) 177 | } 178 | 179 | fn is_open(&mut self) -> bool { 180 | probe_tcp_stream(&mut self.stream).unwrap_or(false) 181 | } 182 | } 183 | 184 | fn probe_tcp_stream(stream: &mut TcpStream) -> Result { 185 | // Temporary do non-blocking IO 186 | stream.set_nonblocking(true)?; 187 | 188 | let mut buf = [0]; 189 | match stream.read(&mut buf) { 190 | Err(e) if e.kind() == io::ErrorKind::WouldBlock => { 191 | // This is the correct condition. There should be no waiting 192 | // bytes, and therefore reading would block 193 | } 194 | // Any bytes read means the server sent some garbage we didn't ask for 195 | Ok(_) => { 196 | debug!("Unexpected bytes from server. Closing connection"); 197 | return Ok(false); 198 | } 199 | // Errors such as closed connection 200 | Err(_) => return Ok(false), 201 | }; 202 | 203 | // Reset back to blocking 204 | stream.set_nonblocking(false)?; 205 | 206 | Ok(true) 207 | } 208 | 209 | impl fmt::Debug for TcpConnector { 210 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 211 | f.debug_struct("TcpConnector").finish() 212 | } 213 | } 214 | 215 | impl fmt::Debug for TcpTransport { 216 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 217 | f.debug_struct("TcpTransport") 218 | .field("addr", &self.stream.peer_addr().ok()) 219 | .finish() 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /src/unversioned/transport/time.rs: -------------------------------------------------------------------------------- 1 | //! Internal time wrappers 2 | 3 | use std::cmp::Ordering; 4 | use std::ops::{Add, Deref}; 5 | use std::time; 6 | 7 | /// Wrapper for [`std::time::Instant`] that provides additional time points in the past or future 8 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 9 | pub enum Instant { 10 | /// A time in the past that already happened. 11 | AlreadyHappened, 12 | /// An exact instant. 13 | Exact(time::Instant), 14 | /// A time in the future that will never happen. 15 | NotHappening, 16 | } 17 | 18 | /// Wrapper for [`std::time::Duration`] that provides a duration to a distant future 19 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 20 | pub enum Duration { 21 | /// An exact duration. 22 | Exact(time::Duration), 23 | /// A duration so long it will never happen. 24 | NotHappening, 25 | } 26 | 27 | impl Duration { 28 | const ZERO: Duration = Duration::Exact(time::Duration::ZERO); 29 | 30 | /// Creates a duration from seconds. 31 | pub fn from_secs(secs: u64) -> Duration { 32 | Duration::Exact(time::Duration::from_secs(secs)) 33 | } 34 | 35 | /// Tells if this duration will ever happen. 36 | pub fn is_not_happening(&self) -> bool { 37 | *self == Duration::NotHappening 38 | } 39 | } 40 | 41 | const NOT_HAPPENING: time::Duration = time::Duration::from_secs(u64::MAX); 42 | 43 | impl Deref for Duration { 44 | type Target = time::Duration; 45 | 46 | fn deref(&self) -> &Self::Target { 47 | match self { 48 | Duration::Exact(v) => v, 49 | Duration::NotHappening => &NOT_HAPPENING, 50 | } 51 | } 52 | } 53 | 54 | impl Instant { 55 | /// Current time. 56 | pub fn now() -> Self { 57 | Instant::Exact(time::Instant::now()) 58 | } 59 | 60 | pub(crate) fn duration_since(&self, earlier: Instant) -> Duration { 61 | match (self, earlier) { 62 | (Instant::AlreadyHappened, Instant::AlreadyHappened) => Duration::ZERO, 63 | (Instant::AlreadyHappened, Instant::Exact(_)) => Duration::ZERO, 64 | (Instant::AlreadyHappened, Instant::NotHappening) => Duration::ZERO, 65 | (Instant::Exact(_), Instant::NotHappening) => Duration::ZERO, 66 | (Instant::Exact(v1), Instant::Exact(v2)) => { 67 | Duration::Exact(v1.saturating_duration_since(v2)) 68 | } 69 | (Instant::Exact(_), Instant::AlreadyHappened) => Duration::NotHappening, 70 | (Instant::NotHappening, Instant::AlreadyHappened) => Duration::NotHappening, 71 | (Instant::NotHappening, Instant::Exact(_)) => Duration::NotHappening, 72 | (Instant::NotHappening, Instant::NotHappening) => Duration::NotHappening, 73 | } 74 | } 75 | } 76 | 77 | impl Add for Instant { 78 | type Output = Instant; 79 | 80 | fn add(self, rhs: Duration) -> Self::Output { 81 | match (self, rhs) { 82 | (Instant::AlreadyHappened, Duration::Exact(_)) => Instant::AlreadyHappened, 83 | (Instant::AlreadyHappened, Duration::NotHappening) => Instant::AlreadyHappened, 84 | (Instant::Exact(v1), Duration::Exact(v2)) => Instant::Exact(v1.add(v2)), 85 | (Instant::Exact(_), Duration::NotHappening) => Instant::NotHappening, 86 | (Instant::NotHappening, Duration::Exact(_)) => Instant::NotHappening, 87 | (Instant::NotHappening, Duration::NotHappening) => Instant::NotHappening, 88 | } 89 | } 90 | } 91 | 92 | impl PartialOrd for Instant { 93 | fn partial_cmp(&self, other: &Self) -> Option { 94 | Some(Self::cmp(self, other)) 95 | } 96 | } 97 | 98 | impl Ord for Instant { 99 | fn cmp(&self, other: &Self) -> Ordering { 100 | match (self, other) { 101 | (Instant::AlreadyHappened, Instant::AlreadyHappened) => Ordering::Equal, 102 | (Instant::AlreadyHappened, Instant::Exact(_)) => Ordering::Less, 103 | (Instant::AlreadyHappened, Instant::NotHappening) => Ordering::Less, 104 | (Instant::Exact(_), Instant::AlreadyHappened) => Ordering::Greater, 105 | (Instant::Exact(v1), Instant::Exact(v2)) => v1.cmp(v2), 106 | (Instant::Exact(_), Instant::NotHappening) => Ordering::Less, 107 | (Instant::NotHappening, Instant::AlreadyHappened) => Ordering::Greater, 108 | (Instant::NotHappening, Instant::Exact(_)) => Ordering::Greater, 109 | (Instant::NotHappening, Instant::NotHappening) => Ordering::Equal, 110 | } 111 | } 112 | } 113 | 114 | impl PartialOrd for Duration { 115 | fn partial_cmp(&self, other: &Self) -> Option { 116 | Some(Self::cmp(self, other)) 117 | } 118 | } 119 | 120 | impl Ord for Duration { 121 | fn cmp(&self, other: &Self) -> Ordering { 122 | match (self, other) { 123 | (Duration::Exact(v1), Duration::Exact(v2)) => v1.cmp(v2), 124 | (Duration::Exact(_), Duration::NotHappening) => Ordering::Less, 125 | (Duration::NotHappening, Duration::Exact(_)) => Ordering::Greater, 126 | (Duration::NotHappening, Duration::NotHappening) => Ordering::Equal, 127 | } 128 | } 129 | } 130 | 131 | impl From for Duration { 132 | fn from(value: std::time::Duration) -> Self { 133 | Self::Exact(value) 134 | } 135 | } 136 | 137 | #[cfg(test)] 138 | mod test { 139 | use super::*; 140 | 141 | #[test] 142 | fn time_ord() { 143 | assert!(Instant::AlreadyHappened < Instant::now()); 144 | assert!(Instant::now() < Instant::NotHappening); 145 | assert!(Instant::AlreadyHappened < Instant::NotHappening); 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::io::{self, ErrorKind}; 3 | use std::{fmt, iter}; 4 | 5 | use http::header::{ACCEPT, ACCEPT_CHARSET, ACCEPT_ENCODING}; 6 | use http::header::{CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TYPE}; 7 | use http::header::{DATE, HOST, LOCATION, SERVER, TRANSFER_ENCODING, USER_AGENT}; 8 | use http::uri::{Authority, Scheme}; 9 | use http::{HeaderMap, HeaderName, HeaderValue, Method, Response, Uri, Version}; 10 | 11 | use crate::http; 12 | use crate::proxy::Proto; 13 | use crate::Error; 14 | 15 | pub(crate) mod private { 16 | pub trait Private {} 17 | } 18 | 19 | pub(crate) trait AuthorityExt { 20 | fn userinfo(&self) -> Option<&str>; 21 | fn username(&self) -> Option<&str>; 22 | fn password(&self) -> Option<&str>; 23 | } 24 | 25 | // NB: Treating &str with direct indexes is OK, since Uri parsed the Authority, 26 | // and ensured it's all ASCII (or %-encoded). 27 | impl AuthorityExt for Authority { 28 | fn userinfo(&self) -> Option<&str> { 29 | let s = self.as_str(); 30 | s.rfind('@').map(|i| &s[..i]) 31 | } 32 | 33 | fn username(&self) -> Option<&str> { 34 | self.userinfo() 35 | .map(|a| a.rfind(':').map(|i| &a[..i]).unwrap_or(a)) 36 | } 37 | 38 | fn password(&self) -> Option<&str> { 39 | self.userinfo() 40 | .and_then(|a| a.rfind(':').map(|i| &a[i + 1..])) 41 | } 42 | } 43 | 44 | pub(crate) trait SchemeExt { 45 | fn default_port(&self) -> Option; 46 | } 47 | 48 | impl SchemeExt for Scheme { 49 | fn default_port(&self) -> Option { 50 | if *self == Scheme::HTTPS { 51 | Some(443) 52 | } else if *self == Scheme::HTTP { 53 | Some(80) 54 | } else if let Ok(proxy) = Proto::try_from(self.as_str()) { 55 | Some(proxy.default_port()) 56 | } else { 57 | debug!("Unknown scheme: {}", self); 58 | None 59 | } 60 | } 61 | } 62 | 63 | /// Windows causes kind `TimedOut` while unix does `WouldBlock`. Since we are not 64 | /// using non-blocking streams, we normalize `WouldBlock` -> `TimedOut`. 65 | pub(crate) trait IoResultExt { 66 | fn normalize_would_block(self) -> Self; 67 | } 68 | 69 | impl IoResultExt for io::Result { 70 | fn normalize_would_block(self) -> Self { 71 | match self { 72 | Ok(v) => Ok(v), 73 | Err(e) if e.kind() == ErrorKind::WouldBlock => { 74 | Err(io::Error::new(ErrorKind::TimedOut, e)) 75 | } 76 | Err(e) => Err(e), 77 | } 78 | } 79 | } 80 | 81 | #[derive(Debug)] 82 | pub(crate) struct ConsumeBuf { 83 | buf: Vec, 84 | filled: usize, 85 | consumed: usize, 86 | } 87 | 88 | impl ConsumeBuf { 89 | pub fn new(size: usize) -> Self { 90 | ConsumeBuf { 91 | buf: vec![0; size], 92 | filled: 0, 93 | consumed: 0, 94 | } 95 | } 96 | 97 | pub fn resize(&mut self, size: usize) { 98 | if size > 100 * 1024 * 1024 { 99 | panic!("ConsumeBuf grown to unreasonable size (>100MB)"); 100 | } 101 | self.buf.resize(size, 0); 102 | } 103 | 104 | pub fn add_space(&mut self, size: usize) { 105 | if size == 0 { 106 | return; 107 | } 108 | let wanted = self.buf.len() + size; 109 | self.resize(wanted); 110 | } 111 | 112 | pub fn free_mut(&mut self) -> &mut [u8] { 113 | self.maybe_shift(); 114 | &mut self.buf[self.filled..] 115 | } 116 | 117 | pub fn add_filled(&mut self, amount: usize) { 118 | self.filled += amount; 119 | assert!(self.filled <= self.buf.len()); 120 | } 121 | 122 | pub fn unconsumed(&self) -> &[u8] { 123 | &self.buf[self.consumed..self.filled] 124 | } 125 | 126 | pub fn unconsumed_mut(&mut self) -> &mut [u8] { 127 | &mut self.buf[self.consumed..self.filled] 128 | } 129 | 130 | pub fn consume(&mut self, amount: usize) { 131 | self.consumed += amount; 132 | assert!(self.consumed <= self.filled); 133 | } 134 | 135 | fn maybe_shift(&mut self) { 136 | if self.consumed == 0 { 137 | return; 138 | } 139 | 140 | if self.consumed == self.filled { 141 | self.consumed = 0; 142 | self.filled = 0; 143 | } else if self.filled > self.buf.len() / 2 { 144 | self.buf.copy_within(self.consumed..self.filled, 0); 145 | self.filled -= self.consumed; 146 | self.consumed = 0; 147 | } 148 | } 149 | } 150 | 151 | /// Wrapper to only log non-sensitive data. 152 | pub(crate) struct DebugRequest<'a> { 153 | pub method: &'a Method, 154 | pub uri: &'a Uri, 155 | pub version: Version, 156 | pub headers: HeaderMap, 157 | } 158 | 159 | impl<'a> fmt::Debug for DebugRequest<'a> { 160 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 161 | f.debug_struct("Request") 162 | .field("method", &self.method) 163 | .field("uri", &DebugUri(self.uri)) 164 | .field("version", &self.version) 165 | .field("headers", &DebugHeaders(&self.headers)) 166 | .finish() 167 | } 168 | } 169 | 170 | /// Wrapper to only log non-sensitive data. 171 | pub(crate) struct DebugResponse<'a, B>(pub &'a Response); 172 | 173 | impl<'a, B> fmt::Debug for DebugResponse<'a, B> { 174 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 175 | f.debug_struct("Response") 176 | .field("status", &self.0.status()) 177 | .field("version", &self.0.version()) 178 | .field("headers", &DebugHeaders(self.0.headers())) 179 | .finish() 180 | } 181 | } 182 | 183 | pub(crate) struct DebugHeaders<'a>(pub &'a HeaderMap); 184 | 185 | const NON_SENSITIVE_HEADERS: &[HeaderName] = &[ 186 | DATE, 187 | CONTENT_TYPE, 188 | CONTENT_LENGTH, 189 | TRANSFER_ENCODING, 190 | CONNECTION, 191 | CONTENT_ENCODING, 192 | HOST, 193 | ACCEPT, 194 | ACCEPT_ENCODING, 195 | ACCEPT_CHARSET, 196 | SERVER, 197 | USER_AGENT, 198 | // LOCATION is also logged in a redacted form 199 | ]; 200 | 201 | impl<'a> fmt::Debug for DebugHeaders<'a> { 202 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 203 | let mut debug = f.debug_map(); 204 | 205 | static REDACTED_LOCATION: HeaderValue = HeaderValue::from_static("******"); 206 | let has_location = self.0.has_location(); 207 | 208 | let filtered_headers = self 209 | .0 210 | .iter() 211 | .filter(|(name, _)| NON_SENSITIVE_HEADERS.contains(name)); 212 | 213 | if has_location { 214 | let location_header = if log_enabled!(log::Level::Trace) { 215 | iter::once((&LOCATION, self.0.get(LOCATION).unwrap())) 216 | } else { 217 | iter::once((&LOCATION, &REDACTED_LOCATION)) 218 | }; 219 | debug.entries(filtered_headers.chain(location_header)); 220 | } else { 221 | debug.entries(filtered_headers); 222 | } 223 | 224 | let redact_count = self 225 | .0 226 | .iter() 227 | .filter(|(name, _)| { 228 | // println!("{}", name); 229 | !NON_SENSITIVE_HEADERS.contains(name) 230 | }) 231 | .count() 232 | // location is logged, but redacted, so do not include in the count 233 | - if has_location { 1 } else { 0 }; 234 | 235 | if redact_count > 0 { 236 | debug.entry( 237 | &"", 238 | &format!("{} HEADERS ARE REDACTED", redact_count), 239 | ); 240 | } 241 | 242 | debug.finish() 243 | } 244 | } 245 | 246 | /// Wrapper to only log non-sensitive data. 247 | pub(crate) struct DebugUri<'a>(pub &'a Uri); 248 | 249 | impl<'a> fmt::Debug for DebugUri<'a> { 250 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 251 | if let Some(s) = self.0.scheme_str() { 252 | write!(f, "{}://", s)?; 253 | } 254 | 255 | if let Some(a) = self.0.authority() { 256 | write!(f, "{:?}", DebugAuthority(a))?; 257 | } 258 | 259 | if let Some(q) = self.0.path_and_query() { 260 | if log_enabled!(log::Level::Trace) { 261 | write!(f, "{}", q)?; 262 | } else { 263 | write!(f, "/******")?; 264 | } 265 | } 266 | 267 | Ok(()) 268 | } 269 | } 270 | 271 | pub(crate) struct DebugAuthority<'a>(pub &'a Authority); 272 | 273 | impl<'a> fmt::Debug for DebugAuthority<'a> { 274 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 275 | let mut at = false; 276 | 277 | if let Some(u) = self.0.username() { 278 | at = true; 279 | if let Some(x) = u.chars().next() { 280 | write!(f, "{}*****", x)?; 281 | } 282 | } 283 | 284 | if self.0.password().is_some() { 285 | at = true; 286 | write!(f, ":******")?; 287 | } 288 | 289 | if at { 290 | write!(f, "@")?; 291 | } 292 | 293 | write!(f, "{}", self.0.host())?; 294 | 295 | if let Some(p) = self.0.port_u16() { 296 | write!(f, ":{}", p)?; 297 | } 298 | 299 | Ok(()) 300 | } 301 | } 302 | 303 | pub(crate) trait UriExt { 304 | fn ensure_valid_url(&self) -> Result<(), Error>; 305 | 306 | #[cfg(feature = "_url")] 307 | fn try_into_url(&self) -> Result; 308 | } 309 | 310 | impl UriExt for Uri { 311 | fn ensure_valid_url(&self) -> Result<(), Error> { 312 | let scheme = self 313 | .scheme() 314 | .ok_or_else(|| Error::BadUri(format!("{} is missing scheme", self)))?; 315 | 316 | scheme 317 | .default_port() 318 | .ok_or_else(|| Error::BadUri(format!("unknown scheme: {}", scheme)))?; 319 | 320 | self.authority() 321 | .ok_or_else(|| Error::BadUri(format!("{} is missing host", self)))?; 322 | 323 | Ok(()) 324 | } 325 | 326 | #[cfg(feature = "_url")] 327 | fn try_into_url(&self) -> Result { 328 | self.ensure_valid_url()?; 329 | let uri = self.to_string(); 330 | 331 | // If ensure_full_url() works, we expect to be able to parse it to a url 332 | let url = url::Url::parse(&uri).expect("parsed url"); 333 | 334 | Ok(url) 335 | } 336 | } 337 | 338 | pub(crate) trait HeaderMapExt { 339 | fn get_str(&self, k: HeaderName) -> Option<&str>; 340 | fn is_chunked(&self) -> bool; 341 | fn content_length(&self) -> Option; 342 | fn has_accept_encoding(&self) -> bool; 343 | fn has_user_agent(&self) -> bool; 344 | fn has_send_body_mode(&self) -> bool { 345 | self.is_chunked() || self.content_length().is_some() 346 | } 347 | fn has_accept(&self) -> bool; 348 | fn has_content_type(&self) -> bool; 349 | fn has_location(&self) -> bool; 350 | } 351 | 352 | impl HeaderMapExt for HeaderMap { 353 | fn get_str(&self, k: HeaderName) -> Option<&str> { 354 | self.get(k).and_then(|v| v.to_str().ok()) 355 | } 356 | 357 | fn is_chunked(&self) -> bool { 358 | self.get_str(TRANSFER_ENCODING) 359 | .map(|v| v.contains("chunked")) 360 | .unwrap_or(false) 361 | } 362 | 363 | fn content_length(&self) -> Option { 364 | let h = self.get_str(CONTENT_LENGTH)?; 365 | let len: u64 = h.parse().ok()?; 366 | Some(len) 367 | } 368 | 369 | fn has_accept_encoding(&self) -> bool { 370 | self.contains_key(ACCEPT_ENCODING) 371 | } 372 | 373 | fn has_user_agent(&self) -> bool { 374 | self.contains_key(USER_AGENT) 375 | } 376 | 377 | fn has_accept(&self) -> bool { 378 | self.contains_key(ACCEPT) 379 | } 380 | 381 | fn has_content_type(&self) -> bool { 382 | self.contains_key(CONTENT_TYPE) 383 | } 384 | 385 | fn has_location(&self) -> bool { 386 | self.contains_key(LOCATION) 387 | } 388 | } 389 | --------------------------------------------------------------------------------