├── .formatter.exs
├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── CHANGELOG.md
├── LICENSE.md
├── README.md
├── lib
├── req.ex
└── req
│ ├── application.ex
│ ├── archive_error.ex
│ ├── checksum_mismatch_error.ex
│ ├── decompress_error.ex
│ ├── fields.ex
│ ├── finch.ex
│ ├── http_error.ex
│ ├── request.ex
│ ├── response.ex
│ ├── response_async.ex
│ ├── steps.ex
│ ├── test.ex
│ ├── test
│ ├── ownership.ex
│ └── ownership_error.ex
│ ├── too_many_redirects_error.ex
│ ├── transport_error.ex
│ └── utils.ex
├── mix.exs
├── mix.lock
└── test
├── my_netrc
├── req
├── default_options_test.exs
├── fields_test.exs
├── finch_test.exs
├── httpc_test.exs
├── integration_test.exs
├── request_test.exs
├── response_test.exs
├── steps_test.exs
├── test_test.exs
└── utils_test.exs
├── req_test.exs
└── test_helper.exs
/.formatter.exs:
--------------------------------------------------------------------------------
1 | # Used by "mix format"
2 | [
3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"]
4 | ]
5 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | pull_request:
4 | push:
5 | branches:
6 | - main
7 | jobs:
8 | test:
9 | runs-on: ubuntu-24.04
10 | env:
11 | MIX_ENV: test
12 | # TODO: Remove on Req 1.0
13 | REQ_NOWARN_OUTPUT: true
14 | strategy:
15 | fail-fast: false
16 | matrix:
17 | include:
18 | - pair:
19 | elixir: "1.14"
20 | otp: "24.3.4.10"
21 | - pair:
22 | elixir: "1.18.4-otp-27"
23 | otp: "28.0"
24 | lint: lint
25 | steps:
26 | - uses: actions/checkout@v4
27 |
28 | - uses: erlef/setup-beam@main
29 | with:
30 | otp-version: ${{ matrix.pair.otp }}
31 | elixir-version: ${{ matrix.pair.elixir }}
32 | version-type: strict
33 |
34 | - uses: actions/cache@v4
35 | with:
36 | path: deps
37 | # ezstd and brotli are compiling native objects to deps/x/_build, not _build,
38 | # so we need to cache per OTP
39 | key: mix-otp-${{ matrix.pair.otp }}-deps-${{ hashFiles('**/mix.lock') }}
40 |
41 | - run: mix deps.get --check-locked
42 |
43 | - run: mix format --check-formatted
44 | if: ${{ matrix.lint }}
45 |
46 | - run: mix deps.unlock --check-unused
47 | if: ${{ matrix.lint }}
48 |
49 | - run: mix deps.compile
50 |
51 | - run: mix compile --no-optional-deps --warnings-as-errors
52 | if: ${{ matrix.lint }}
53 |
54 | - run: mix test --slowest 5
55 | if: ${{ ! matrix.lint }}
56 |
57 | - run: mix test --slowest 5 --warnings-as-errors
58 | if: ${{ matrix.lint }}
59 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # The directory Mix will write compiled artifacts to.
2 | /_build/
3 |
4 | # If you run "mix test --cover", coverage assets end up here.
5 | /cover/
6 |
7 | # The directory Mix downloads your dependencies sources to.
8 | /deps/
9 |
10 | # Where third-party dependencies like ExDoc output generated docs.
11 | /doc/
12 |
13 | # Ignore .fetch files in case you like to edit your project deps locally.
14 | /.fetch
15 |
16 | # If the VM crashes, it generates a dump, let's ignore it too.
17 | erl_crash.dump
18 |
19 | # Also ignore archive artifacts (built via "mix archive.build").
20 | *.ez
21 |
22 | # Ignore package tarball (built via "mix hex.build").
23 | req-*.tar
24 |
25 | # Temporary files, for example, from tests.
26 | /tmp/
27 | /push.sh
28 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2021 Wojtek Mach
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Req
2 |
3 | [](https://github.com/wojtekmach/req/actions/workflows/ci.yml)
4 | [](https://github.com/wojtekmach/req/blob/main/LICENSE.md)
5 | [](https://hex.pm/packages/req)
6 | [](https://hexdocs.pm/req)
7 |
8 | Req is a batteries-included HTTP client for Elixir.
9 |
10 | With just a couple lines of code:
11 |
12 | ```elixir
13 | Mix.install([
14 | {:req, "~> 0.5.0"}
15 | ])
16 |
17 | Req.get!("https://api.github.com/repos/wojtekmach/req").body["description"]
18 | #=> "Req is a batteries-included HTTP client for Elixir."
19 | ```
20 |
21 | we get automatic response body decompression & decoding, following redirects, retrying on errors,
22 | and much more. Virtually all of the features are broken down into individual functions called
23 | _steps_. You can easily re-use and re-arrange built-in steps (see [`Req.Steps`] module) and
24 | write new ones.
25 |
26 | ## Features
27 |
28 | * An easy to use high-level API: [`Req.request/1`], [`Req.new/1`], [`Req.get!/2`], [`Req.post!/2`], etc.
29 |
30 | * Extensibility via request, response, and error steps.
31 |
32 | * Request body compression (via [`compress_body`] step)
33 |
34 | * Automatic response body decompression (via [`compressed`] and [`decompress_body`] steps). Supports gzip, brotli, and zstd.
35 |
36 | * Request body encoding. Supports urlencoded and multipart forms, and JSON. See [`encode_body`].
37 |
38 | * Automatic response body decoding (via [`decode_body`] step.)
39 |
40 | * Encode params as query string (via [`put_params`] step.)
41 |
42 | * Setting base URL (via [`put_base_url`] step.)
43 |
44 | * Templated request paths (via [`put_path_params`] step.)
45 |
46 | * Basic, bearer, and `.netrc` authentication (via [`auth`] step.)
47 |
48 | * Range requests (via [`put_range`]) step.)
49 |
50 | * Use AWS V4 Signature (via [`put_aws_sigv4`]) step.)
51 |
52 | * Request body streaming (by setting `body: enumerable`.)
53 |
54 | * Response body streaming (by setting `into: fun | collectable | :self`.)
55 |
56 | * Follows redirects (via [`redirect`] step.)
57 |
58 | * Retries on errors (via [`retry`] step.)
59 |
60 | * Raise on 4xx/5xx errors (via [`handle_http_errors`] step.)
61 |
62 | * Verify response body against a checksum (via [`checksum`] step.)
63 |
64 | * Basic HTTP caching (via [`cache`] step.)
65 |
66 | * Easily create test stubs (see [`Req.Test`].)
67 |
68 | * Running against a plug (via [`run_plug`] step.)
69 |
70 | * Pluggable adapters. By default, Req uses [Finch] (via [`run_finch`] step.)
71 |
72 | ## Usage
73 |
74 | The easiest way to use Req is with [`Mix.install/2`] (requires Elixir v1.12+):
75 |
76 | ```elixir
77 | Mix.install([
78 | {:req, "~> 0.5.0"}
79 | ])
80 |
81 | Req.get!("https://api.github.com/repos/wojtekmach/req").body["description"]
82 | #=> "Req is a batteries-included HTTP client for Elixir."
83 | ```
84 |
85 | If you want to use Req in a Mix project, you can add the above dependency to your `mix.exs`.
86 |
87 | Here's an example POST with JSON data:
88 |
89 | ```elixir
90 | iex> Req.post!("https://httpbin.org/post", json: %{x: 1, y: 2}).body["json"]
91 | %{"x" => 1, "y" => 2}
92 | ```
93 |
94 | You can stream request body:
95 |
96 | ```elixir
97 | iex> stream = Stream.duplicate("foo", 3)
98 | iex> Req.post!("https://httpbin.org/post", body: stream).body["data"]
99 | "foofoofoo"
100 | ```
101 |
102 | and stream the response body:
103 |
104 | ```elixir
105 | iex> resp = Req.get!("http://httpbin.org/stream/2", into: IO.stream())
106 | # output: {"url": "http://httpbin.org/stream/2", ...}
107 | # output: {"url": "http://httpbin.org/stream/2", ...}
108 | iex> resp.status
109 | 200
110 | iex> resp.body
111 | %IO.Stream{}
112 | ```
113 |
114 | (See [`Req`] module documentation for more examples of response body streaming.)
115 |
116 | If you are planning to make several similar requests, you can build up a request struct with
117 | desired common options and re-use it:
118 |
119 | ```elixir
120 | req = Req.new(base_url: "https://api.github.com")
121 |
122 | Req.get!(req, url: "/repos/sneako/finch").body["description"]
123 | #=> "Elixir HTTP client, focused on performance"
124 |
125 | Req.get!(req, url: "/repos/elixir-mint/mint").body["description"]
126 | #=> "Functional HTTP client for Elixir with support for HTTP/1 and HTTP/2."
127 | ```
128 |
129 | See [`Req.new/1`] for more information on available options.
130 |
131 | Virtually all of Req's features are broken down into individual pieces - steps. Req works by running
132 | the request struct through these steps. You can easily reuse or rearrange built-in steps or write new
133 | ones. Importantly, steps are just regular functions. Here is another example where we append a request
134 | step that inspects the URL just before requesting it:
135 |
136 | ```elixir
137 | req =
138 | Req.new(base_url: "https://api.github.com")
139 | |> Req.Request.append_request_steps(
140 | debug_url: fn request ->
141 | IO.inspect(URI.to_string(request.url))
142 | request
143 | end
144 | )
145 |
146 | Req.get!(req, url: "/repos/wojtekmach/req").body["description"]
147 | # output: "https://api.github.com/repos/wojtekmach/req"
148 | #=> "Req is a batteries-included HTTP client for Elixir."
149 | ```
150 |
151 | Custom steps can be packaged into plugins so that they are even easier to use by others. See [Related Packages](#related-packages).
152 |
153 | Here is how they can be used:
154 |
155 | ```elixir
156 | Mix.install([
157 | {:req, "~> 0.5.0"},
158 | {:req_easyhtml, "~> 0.1.0"},
159 | {:req_s3, "~> 0.2.3"},
160 | {:req_hex, "~> 0.2.0"},
161 | {:req_github_oauth, "~> 0.1.0"}
162 | ])
163 |
164 | req =
165 | (Req.new(http_errors: :raise)
166 | |> ReqEasyHTML.attach()
167 | |> ReqS3.attach()
168 | |> ReqHex.attach()
169 | |> ReqGitHubOAuth.attach())
170 |
171 | Req.get!(req, url: "https://elixir-lang.org").body[".entry-summary h5"]
172 | #=>
173 | # #EasyHTML[
174 | # Elixir is a dynamic, functional language for building scalable and maintainable applications.
175 | #
]
176 |
177 | Req.get!(req, url: "s3://ossci-datasets/mnist/t10k-images-idx3-ubyte.gz").body
178 | #=> <<0, 0, 8, 3, ...>>
179 |
180 | Req.get!(req, url: "https://repo.hex.pm/tarballs/req-0.1.0.tar").body["metadata.config"]["links"]
181 | #=> %{"GitHub" => "https://github.com/wojtekmach/req"}
182 |
183 | Req.get!(req, url: "https://api.github.com/user").body["login"]
184 | # output:
185 | # paste this user code:
186 | #
187 | # 6C44-30A8
188 | #
189 | # at:
190 | #
191 | # https://github.com/login/device
192 | #
193 | # open browser window? [Yn]
194 | # 15:22:28.350 [info] response: authorization_pending
195 | # 15:22:33.519 [info] response: authorization_pending
196 | # 15:22:38.678 [info] response: authorization_pending
197 | #=> "wojtekmach"
198 |
199 | Req.get!(req, url: "https://api.github.com/user").body["login"]
200 | #=> "wojtekmach"
201 | ```
202 |
203 | See [`Req.Request`] module documentation for more information on low-level API, request struct, and developing plugins.
204 |
205 | ## Configuration
206 |
207 | Req supports many configuration options, see [`Req.new/1`] for a full list and see each step for
208 | more details. In particular, if you are looking for slightly lower level HTTP options such as
209 | timeouts, pool sizes, and certificates, see the [`run_finch`] documentation.
210 |
211 | ## Related Packages
212 |
213 | There are many packages that extend the Req library. To get yours listed here, send a PR.
214 |
215 | * [`req_easyhtml`]
216 | * [`req_s3`]
217 | * [`req_hex`]
218 | * [`req_github_oauth`]
219 | * [`curl_req`]
220 | * [`http_cookie`]
221 | * [`req_embed`]
222 |
223 | ## Presentations
224 |
225 | * [Req: A batteries-included HTTP client for Elixir - ElixirConf 2023, 2023-09-08](https://www.youtube.com/watch?v=owz2QacFuoQ "ElixirConf 2023 - Wojtek Mach - Req - a batteries-included HTTP client for Elixir")
226 | * [Req: A batteries included HTTP client for Elixir - Elixir Kenya, 2022-08-26](https://www.youtube.com/watch?v=NxWgvHRN6mI "Req: A batteries included HTTP client for Elixir")
227 |
228 | ## Acknowledgments
229 |
230 | Req is built on top of [Finch] and is inspired by [cURL], [Requests], [Tesla], and many other HTTP clients - thank you!
231 |
232 | ## License
233 |
234 | Copyright (c) 2021 Wojtek Mach
235 |
236 | Licensed under the Apache License, Version 2.0 (the "License");
237 | you may not use this file except in compliance with the License.
238 | You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
239 |
240 | Unless required by applicable law or agreed to in writing, software
241 | distributed under the License is distributed on an "AS IS" BASIS,
242 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
243 | See the License for the specific language governing permissions and
244 | limitations under the License.
245 |
246 | [`Req.request/1`]: https://hexdocs.pm/req/Req.html#request/1
247 | [`Req.new/1`]: https://hexdocs.pm/req/Req.html#new/1
248 | [`Req.get!/2`]: https://hexdocs.pm/req/Req.html#get!/2
249 | [`Req.post!/2`]: https://hexdocs.pm/req/Req.html#post!/2
250 | [`Req`]: https://hexdocs.pm/req
251 | [`Req.Request`]: https://hexdocs.pm/req/Req.Request.html
252 | [`Req.Steps`]: https://hexdocs.pm/req/Req.Steps.html
253 | [`Req.Test`]: https://hexdocs.pm/req/Req.Test.html
254 |
255 | [`auth`]: https://hexdocs.pm/req/Req.Steps.html#auth/1
256 | [`cache`]: https://hexdocs.pm/req/Req.Steps.html#cache/1
257 | [`compress_body`]: https://hexdocs.pm/req/Req.Steps.html#compress_body/1
258 | [`compressed`]: https://hexdocs.pm/req/Req.Steps.html#compressed/1
259 | [`decode_body`]: https://hexdocs.pm/req/Req.Steps.html#decode_body/1
260 | [`decompress_body`]: https://hexdocs.pm/req/Req.Steps.html#decompress_body/1
261 | [`encode_body`]: https://hexdocs.pm/req/Req.Steps.html#encode_body/1
262 | [`redirect`]: https://hexdocs.pm/req/Req.Steps.html#redirect/1
263 | [`handle_http_errors`]: https://hexdocs.pm/req/Req.Steps.html#handle_http_errors/1
264 | [`output`]: https://hexdocs.pm/req/Req.Steps.html#output/1
265 | [`put_base_url`]: https://hexdocs.pm/req/Req.Steps.html#put_base_url/1
266 | [`put_params`]: https://hexdocs.pm/req/Req.Steps.html#put_params/1
267 | [`put_path_params`]: https://hexdocs.pm/req/Req.Steps.html#put_path_params/1
268 | [`run_plug`]: https://hexdocs.pm/req/Req.Steps.html#run_plug/1
269 | [`put_range`]: https://hexdocs.pm/req/Req.Steps.html#put_range/1
270 | [`put_user_agent`]: https://hexdocs.pm/req/Req.Steps.html#put_user_agent/1
271 | [`retry`]: https://hexdocs.pm/req/Req.Steps.html#retry/1
272 | [`run_finch`]: https://hexdocs.pm/req/Req.Steps.html#run_finch/1
273 | [`checksum`]: https://hexdocs.pm/req/Req.Steps.html#checksum/1
274 | [`put_aws_sigv4`]: https://hexdocs.pm/req/Req.Steps.html#put_aws_sigv4/1
275 |
276 | [Finch]: https://github.com/sneako/finch
277 | [cURL]: https://curl.se
278 | [Requests]: https://docs.python-requests.org/en/master/
279 | [Tesla]: https://github.com/elixir-tesla/tesla
280 | [`req_easyhtml`]: https://github.com/wojtekmach/req_easyhtml
281 | [`req_s3`]: https://github.com/wojtekmach/req_s3
282 | [`req_hex`]: https://github.com/wojtekmach/req_hex
283 | [`req_github_oauth`]: https://github.com/wojtekmach/req_github_oauth
284 | [`Mix.install/2`]: https://hexdocs.pm/mix/Mix.html#install/2
285 | [`curl_req`]: https://github.com/derekkraan/curl_req
286 | [`http_cookie`]: https://github.com/reisub/http_cookie
287 | [`req_embed`]: https://github.com/leandrocp/req_embed
288 |
--------------------------------------------------------------------------------
/lib/req/application.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.Application do
2 | @moduledoc false
3 |
4 | use Application
5 |
6 | @impl true
7 | def start(_type, _args) do
8 | children = [
9 | {Finch,
10 | name: Req.Finch,
11 | pools: %{
12 | default: Req.Finch.pool_options(%{})
13 | }},
14 | {DynamicSupervisor, strategy: :one_for_one, name: Req.FinchSupervisor},
15 | {Req.Test.Ownership, name: Req.Test.Ownership}
16 | ]
17 |
18 | Supervisor.start_link(children, strategy: :one_for_one)
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/lib/req/archive_error.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.ArchiveError do
2 | @moduledoc """
3 | Represents an error when unpacking archives fails, returned by `Req.Steps.decode_body/1`.
4 | """
5 |
6 | defexception [:format, :data, :reason]
7 |
8 | @impl true
9 | def message(%{format: :tar, reason: reason}) do
10 | "tar unpacking failed: #{:erl_tar.format_error(reason)}"
11 | end
12 |
13 | @impl true
14 | def message(%{format: format, reason: nil}) do
15 | "#{format} unpacking failed"
16 | end
17 |
18 | @impl true
19 | def message(%{format: format, reason: reason}) do
20 | "#{format} unpacking failed: #{inspect(reason)}"
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/lib/req/checksum_mismatch_error.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.ChecksumMismatchError do
2 | @moduledoc """
3 | Represents a checksum mismatch error returned by `Req.Steps.checksum/1`.
4 | """
5 |
6 | defexception [:expected, :actual]
7 |
8 | @impl true
9 | def message(%{expected: expected, actual: actual}) do
10 | """
11 | checksum mismatch
12 | expected: #{expected}
13 | actual: #{actual}\
14 | """
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/lib/req/decompress_error.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.DecompressError do
2 | @moduledoc """
3 | Represents an error when decompression fails, returned by `Req.Steps.decompress_body/1`.
4 | """
5 |
6 | defexception [:format, :data, :reason]
7 |
8 | @impl true
9 | def message(%{format: format, reason: nil}) do
10 | "#{format} decompression failed"
11 | end
12 |
13 | @impl true
14 | def message(%{format: format, reason: reason}) do
15 | "#{format} decompression failed, reason: #{inspect(reason)}"
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/lib/req/fields.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.Fields do
2 | @moduledoc false
3 | # Conveniences for working with HTTP Fields, i.e. HTTP Headers and HTTP Trailers.
4 |
5 | @legacy? Req.MixProject.legacy_headers_as_lists?()
6 |
7 | # Legacy behaviour previously used in Req.Request.new.
8 | # I plan to use Req.Fields.new, i.e. normalize.
9 | def new_without_normalize(enumerable)
10 |
11 | if @legacy? do
12 | def new_without_normalize(enumerable) do
13 | Enum.to_list(enumerable)
14 | end
15 | else
16 | def new_without_normalize(enumerable) do
17 | Map.new(enumerable, fn {key, value} ->
18 | {key, List.wrap(value)}
19 | end)
20 | end
21 | end
22 |
23 | # Legacy behaviour previously used in Req.Response.new.
24 | # I plan to use Req.Fields.new, i.e. normalize.
25 | def new_without_normalize_with_duplicates(enumerable)
26 |
27 | if @legacy? do
28 | def new_without_normalize_with_duplicates(enumerable) do
29 | Enum.to_list(enumerable)
30 | end
31 | else
32 | def new_without_normalize_with_duplicates(enumerable) do
33 | Enum.reduce(enumerable, %{}, fn {name, value}, acc ->
34 | Map.update(acc, name, List.wrap(value), &(&1 ++ List.wrap(value)))
35 | end)
36 | end
37 | end
38 |
39 | @doc """
40 | Returns fields from a given enumerable.
41 |
42 | ## Examples
43 |
44 | iex> Req.Fields.new(a: 1, b: [1, 2])
45 | %{"a" => ["1"], "b" => ["1", "2"]}
46 |
47 | iex> Req.Fields.new(%{"a" => ["1"], "b" => ["1", "2"]})
48 | %{"a" => ["1"], "b" => ["1", "2"]}
49 | """
50 | if @legacy? do
51 | def new(enumerable) do
52 | for {name, value} <- enumerable do
53 | {normalize_name(name), normalize_value(value)}
54 | end
55 | end
56 | else
57 | def new(enumerable) do
58 | Enum.reduce(enumerable, %{}, fn {name, value}, acc ->
59 | Map.update(
60 | acc,
61 | normalize_name(name),
62 | normalize_values(List.wrap(value)),
63 | &(&1 ++ normalize_values(List.wrap(value)))
64 | )
65 | end)
66 | end
67 |
68 | defp normalize_values([value | rest]) do
69 | [normalize_value(value) | normalize_values(rest)]
70 | end
71 |
72 | defp normalize_values([]) do
73 | []
74 | end
75 | end
76 |
77 | defp normalize_name(name) when is_atom(name) do
78 | name |> Atom.to_string() |> String.replace("_", "-") |> ensure_name_downcase()
79 | end
80 |
81 | defp normalize_name(name) when is_binary(name) do
82 | ensure_name_downcase(name)
83 | end
84 |
85 | defp normalize_value(%DateTime{} = datetime) do
86 | datetime |> DateTime.shift_zone!("Etc/UTC") |> Req.Utils.format_http_date()
87 | end
88 |
89 | defp normalize_value(%NaiveDateTime{} = datetime) do
90 | IO.warn("setting field to %NaiveDateTime{} is deprecated, use %DateTime{} instead")
91 | Req.Utils.format_http_date(datetime)
92 | end
93 |
94 | defp normalize_value(value) when is_binary(value) do
95 | value
96 | end
97 |
98 | defp normalize_value(value) when is_integer(value) do
99 | Integer.to_string(value)
100 | end
101 |
102 | defp normalize_value(value) do
103 | IO.warn(
104 | "setting field to value other than string, integer, or %DateTime{} is deprecated," <>
105 | " got: #{inspect(value)}"
106 | )
107 |
108 | String.Chars.to_string(value)
109 | end
110 |
111 | @doc """
112 | Merges `fields1` and `fields2`.
113 |
114 | ## Examples
115 |
116 | iex> Req.Fields.merge(%{"a" => ["1"]}, %{"a" => ["2"], "b" => ["2"]})
117 | %{"a" => ["2"], "b" => ["2"]}
118 | """
119 | def merge(fields1, fields2)
120 |
121 | def merge(old_fields, new_fields) do
122 | if unquote(@legacy?) do
123 | new_fields = new(new_fields)
124 | new_field_names = Enum.map(new_fields, &elem(&1, 0))
125 | Enum.reject(old_fields, &(elem(&1, 0) in new_field_names)) ++ new_fields
126 | else
127 | Map.merge(old_fields, new(new_fields))
128 | end
129 | end
130 |
131 | def ensure_name_downcase(name) do
132 | String.downcase(name, :ascii)
133 | end
134 |
135 | @doc """
136 | Returns field values.
137 | """
138 | def get_values(fields, name)
139 |
140 | if @legacy? do
141 | def get_values(fields, name) when is_binary(name) do
142 | name = ensure_name_downcase(name)
143 |
144 | for {^name, value} <- fields do
145 | value
146 | end
147 | end
148 | else
149 | def get_values(fields, name) when is_binary(name) do
150 | name = ensure_name_downcase(name)
151 | Map.get(fields, name, [])
152 | end
153 | end
154 |
155 | @doc """
156 | Adds a new field `name` with the given `value` if not present,
157 | otherwise replaces previous value with `value`.
158 | """
159 | def put(fields, name, value)
160 |
161 | if @legacy? do
162 | def put(fields, name, value) when is_binary(name) and is_binary(value) do
163 | name = ensure_name_downcase(name)
164 | List.keystore(fields, name, 0, {name, value})
165 | end
166 | else
167 | def put(fields, name, value) when is_binary(name) and is_binary(value) do
168 | name = ensure_name_downcase(name)
169 | put_in(fields[name], List.wrap(value))
170 | end
171 | end
172 |
173 | @doc """
174 | Adds a field `name` unless already present.
175 | """
176 | def put_new(fields, name, value)
177 |
178 | if @legacy? do
179 | def put_new(fields, name, value) when is_binary(name) and is_binary(value) do
180 | case get_values(fields, name) do
181 | [] ->
182 | put(fields, name, value)
183 |
184 | _ ->
185 | fields
186 | end
187 | end
188 | else
189 | def put_new(fields, name, value) when is_binary(name) and is_binary(value) do
190 | name = ensure_name_downcase(name)
191 | Map.put_new(fields, name, List.wrap(value))
192 | end
193 | end
194 |
195 | @doc """
196 | Deletes the field given by `name`.
197 | """
198 | def delete(fields, name)
199 |
200 | if @legacy? do
201 | def delete(fields, name) when is_binary(name) do
202 | name_to_delete = ensure_name_downcase(name)
203 |
204 | for {name, value} <- fields,
205 | name != name_to_delete do
206 | {name, value}
207 | end
208 | end
209 | else
210 | def delete(fields, name) when is_binary(name) do
211 | name = ensure_name_downcase(name)
212 | Map.delete(fields, name)
213 | end
214 | end
215 |
216 | @doc """
217 | Returns fields as list.
218 | """
219 | def get_list(fields)
220 |
221 | if @legacy? do
222 | def get_list(fields) do
223 | fields
224 | end
225 | else
226 | def get_list(fields) do
227 | for {name, values} <- fields,
228 | value <- values do
229 | {name, value}
230 | end
231 | end
232 | end
233 | end
234 |
--------------------------------------------------------------------------------
/lib/req/finch.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.Finch do
2 | @moduledoc false
3 |
4 | @doc """
5 | Runs the request using `Finch`.
6 | """
7 | def run(request) do
8 | # URI.parse removes `[` and `]` so we can't check for these. The host
9 | # should not have `:` so it should be safe to check for it.
10 | request =
11 | if !request.options[:inet6] and
12 | (request.url.host || "") =~ ":" do
13 | request = put_in(request.options[:inet6], true)
14 | # ...and have to put them back for host header.
15 | Req.Request.put_new_header(request, "host", "[#{request.url.host}]")
16 | else
17 | request
18 | end
19 |
20 | finch_name = finch_name(request)
21 |
22 | request_headers =
23 | if unquote(Req.MixProject.legacy_headers_as_lists?()) do
24 | request.headers
25 | else
26 | for {name, values} <- request.headers,
27 | value <- values do
28 | {name, value}
29 | end
30 | end
31 |
32 | body =
33 | case request.body do
34 | iodata when is_binary(iodata) or is_list(iodata) ->
35 | iodata
36 |
37 | nil ->
38 | nil
39 |
40 | enumerable ->
41 | {:stream, enumerable}
42 | end
43 |
44 | finch_request =
45 | Finch.build(request.method, request.url, request_headers, body)
46 | |> Map.replace!(:unix_socket, request.options[:unix_socket])
47 | |> add_private_options(request.options[:finch_private])
48 |
49 | finch_options =
50 | request.options |> Map.take([:receive_timeout, :pool_timeout]) |> Enum.to_list()
51 |
52 | run(request, finch_request, finch_name, finch_options)
53 | end
54 |
55 | defp run(req, finch_req, finch_name, finch_options) do
56 | case req.options[:finch_request] do
57 | fun when is_function(fun, 4) ->
58 | fun.(req, finch_req, finch_name, finch_options)
59 |
60 | deprecated_fun when is_function(deprecated_fun, 1) ->
61 | IO.warn(
62 | "passing a :finch_request function accepting a single argument is deprecated. " <>
63 | "See Req.Steps.run_finch/1 for more information."
64 | )
65 |
66 | {req, run_finch_request(deprecated_fun.(finch_req), finch_name, finch_options)}
67 |
68 | nil ->
69 | case req.into do
70 | nil ->
71 | {req, run_finch_request(finch_req, finch_name, finch_options)}
72 |
73 | fun when is_function(fun, 2) ->
74 | finch_stream_into_fun(req, finch_req, finch_name, finch_options, fun)
75 |
76 | :legacy_self ->
77 | finch_stream_into_legacy_self(req, finch_req, finch_name, finch_options)
78 |
79 | :self ->
80 | finch_stream_into_self(req, finch_req, finch_name, finch_options)
81 |
82 | collectable ->
83 | finch_stream_into_collectable(req, finch_req, finch_name, finch_options, collectable)
84 | end
85 | end
86 | end
87 |
88 | defp finch_stream_into_fun(req, finch_req, finch_name, finch_options, fun) do
89 | resp = Req.Response.new()
90 |
91 | fun = fn
92 | {:status, status}, {req, resp} ->
93 | {:cont, {req, %{resp | status: status}}}
94 |
95 | {:headers, fields}, {req, resp} ->
96 | resp =
97 | Enum.reduce(fields, resp, fn {name, value}, resp ->
98 | Req.Response.put_header(resp, name, value)
99 | end)
100 |
101 | {:cont, {req, resp}}
102 |
103 | {:data, data}, acc ->
104 | fun.({:data, data}, acc)
105 |
106 | {:trailers, fields}, {req, resp} ->
107 | fields = finch_fields_to_map(fields)
108 | resp = update_in(resp.trailers, &Map.merge(&1, fields))
109 | {:cont, {req, resp}}
110 | end
111 |
112 | case Finch.stream_while(finch_req, finch_name, {req, resp}, fun, finch_options) do
113 | {:ok, acc} ->
114 | acc
115 |
116 | # TODO: remove when we require Finch 0.20
117 | {:error, exception} ->
118 | {req, normalize_error(exception)}
119 |
120 | {:error, exception, _acc} ->
121 | {req, normalize_error(exception)}
122 | end
123 | end
124 |
125 | defp finch_stream_into_collectable(req, finch_req, finch_name, finch_options, collectable) do
126 | resp = Req.Response.new()
127 |
128 | fun = fn
129 | {:status, 200}, {nil, req, resp} ->
130 | {acc, collector} = Collectable.into(collectable)
131 | {{acc, collector}, req, %{resp | status: 200}}
132 |
133 | {:status, status}, {nil, req, resp} ->
134 | {acc, collector} = Collectable.into("")
135 | {{acc, collector}, req, %{resp | status: status}}
136 |
137 | {:headers, fields}, {acc, req, resp} ->
138 | resp =
139 | Enum.reduce(fields, resp, fn {name, value}, resp ->
140 | Req.Response.put_header(resp, name, value)
141 | end)
142 |
143 | {acc, req, resp}
144 |
145 | {:data, data}, {{acc, collector}, req, resp} ->
146 | acc = collector.(acc, {:cont, data})
147 | {{acc, collector}, req, resp}
148 |
149 | {:trailers, fields}, {acc, req, resp} ->
150 | fields = finch_fields_to_map(fields)
151 | resp = update_in(resp.trailers, &Map.merge(&1, fields))
152 | {acc, req, resp}
153 | end
154 |
155 | case Finch.stream(finch_req, finch_name, {nil, req, resp}, fun, finch_options) do
156 | {:ok, {{acc, collector}, req, resp}} ->
157 | acc = collector.(acc, :done)
158 | {req, %{resp | body: acc}}
159 |
160 | # TODO: remove when we require Finch 0.20
161 | {:error, exception} ->
162 | {req, normalize_error(exception)}
163 |
164 | {:error, exception, {nil, _req, _resp}} ->
165 | {req, normalize_error(exception)}
166 |
167 | {:error, exception, {{acc, collector}, _req, _resp}} ->
168 | collector.(acc, :halt)
169 | {req, normalize_error(exception)}
170 | end
171 | end
172 |
173 | defp normalize_error(%Mint.TransportError{reason: reason}) do
174 | %Req.TransportError{reason: reason}
175 | end
176 |
177 | defp normalize_error(%Mint.HTTPError{module: Mint.HTTP1, reason: reason}) do
178 | %Req.HTTPError{protocol: :http1, reason: reason}
179 | end
180 |
181 | defp normalize_error(%Mint.HTTPError{module: Mint.HTTP2, reason: reason}) do
182 | %Req.HTTPError{protocol: :http2, reason: reason}
183 | end
184 |
185 | defp normalize_error(%Finch.Error{reason: reason}) do
186 | %Req.HTTPError{protocol: :http2, reason: reason}
187 | end
188 |
189 | defp normalize_error(error) do
190 | error
191 | end
192 |
193 | defp finch_stream_into_legacy_self(req, finch_req, finch_name, finch_options) do
194 | ref = Finch.async_request(finch_req, finch_name, finch_options)
195 |
196 | {:status, status} =
197 | receive do
198 | {^ref, message} ->
199 | message
200 | end
201 |
202 | headers =
203 | receive do
204 | {^ref, message} ->
205 | {:headers, headers} = message
206 |
207 | handle_finch_headers(headers)
208 | end
209 |
210 | async = %Req.Response.Async{
211 | pid: self(),
212 | ref: ref,
213 | stream_fun: &parse_message/2,
214 | cancel_fun: &cancel/1
215 | }
216 |
217 | req = put_in(req.async, async)
218 | resp = Req.Response.new(status: status, headers: headers)
219 | {req, resp}
220 | end
221 |
222 | defp finch_stream_into_self(req, finch_req, finch_name, finch_options) do
223 | ref = Finch.async_request(finch_req, finch_name, finch_options)
224 |
225 | with {:status, status} <- recv_status(req, ref),
226 | {:headers, headers} <- recv_headers(req, ref) do
227 | # TODO: handle trailers
228 | headers = handle_finch_headers(headers)
229 |
230 | async = %Req.Response.Async{
231 | pid: self(),
232 | ref: ref,
233 | stream_fun: &parse_message/2,
234 | cancel_fun: &cancel/1
235 | }
236 |
237 | resp = Req.Response.new(status: status, headers: headers, body: async)
238 | {req, resp}
239 | end
240 | end
241 |
242 | defp recv_status(req, ref) do
243 | receive do
244 | {^ref, {:status, status}} ->
245 | {:status, status}
246 |
247 | {^ref, {:error, exception}} ->
248 | {req, normalize_error(exception)}
249 | end
250 | end
251 |
252 | defp recv_headers(req, ref) do
253 | receive do
254 | {^ref, {:headers, headers}} ->
255 | {:headers, headers}
256 |
257 | {^ref, {:error, exception}} ->
258 | {req, normalize_error(exception)}
259 | end
260 | end
261 |
262 | defp run_finch_request(finch_request, finch_name, finch_options) do
263 | case Finch.request(finch_request, finch_name, finch_options) do
264 | {:ok, response} ->
265 | Req.Response.new(response)
266 |
267 | {:error, exception} ->
268 | normalize_error(exception)
269 | end
270 | end
271 |
272 | defp add_private_options(finch_request, nil) do
273 | finch_request
274 | end
275 |
276 | defp add_private_options(finch_request, private_options)
277 | when is_list(private_options) or is_map(private_options) do
278 | Enum.reduce(private_options, finch_request, fn {k, v}, acc_finch_req ->
279 | Finch.Request.put_private(acc_finch_req, k, v)
280 | end)
281 | end
282 |
283 | if Req.MixProject.legacy_headers_as_lists?() do
284 | defp handle_finch_headers(headers), do: headers
285 | else
286 | defp handle_finch_headers(headers), do: finch_fields_to_map(headers)
287 | end
288 |
289 | defp finch_fields_to_map(fields) do
290 | Enum.reduce(fields, %{}, fn {name, value}, acc ->
291 | Map.update(acc, name, [value], &(&1 ++ [value]))
292 | end)
293 | end
294 |
295 | defp parse_message(ref, {ref, {:data, data}}) do
296 | {:ok, [data: data]}
297 | end
298 |
299 | defp parse_message(ref, {ref, :done}) do
300 | {:ok, [:done]}
301 | end
302 |
303 | defp parse_message(ref, {ref, {:trailers, trailers}}) do
304 | {:ok, [trailers: trailers]}
305 | end
306 |
307 | defp parse_message(ref, {ref, {:error, reason}}) do
308 | {:error, reason}
309 | end
310 |
311 | defp parse_message(_, _) do
312 | :unknown
313 | end
314 |
315 | defp cancel(ref) do
316 | Finch.cancel_async_request(ref)
317 | clean_responses(ref)
318 | :ok
319 | end
320 |
321 | defp clean_responses(ref) do
322 | receive do
323 | {^ref, _} -> clean_responses(ref)
324 | after
325 | 0 -> :ok
326 | end
327 | end
328 |
329 | defp finch_name(request) do
330 | custom_options? =
331 | Map.has_key?(request.options, :connect_options) or Map.has_key?(request.options, :inet6)
332 |
333 | cond do
334 | name = request.options[:finch] ->
335 | if custom_options? do
336 | raise ArgumentError, "cannot set both :finch and :connect_options"
337 | else
338 | name
339 | end
340 |
341 | custom_options? ->
342 | pool_options = pool_options(request.options)
343 |
344 | name =
345 | pool_options
346 | |> :erlang.term_to_binary()
347 | |> :erlang.md5()
348 | |> Base.url_encode64(padding: false)
349 |
350 | name = Module.concat(Req.FinchSupervisor, "Pool_#{name}")
351 |
352 | case DynamicSupervisor.start_child(
353 | Req.FinchSupervisor,
354 | {Finch, name: name, pools: %{default: pool_options}}
355 | ) do
356 | {:ok, _} ->
357 | name
358 |
359 | {:error, {:already_started, _}} ->
360 | name
361 | end
362 |
363 | true ->
364 | Req.Finch
365 | end
366 | end
367 |
368 | @doc """
369 | Returns Finch pool options for the given Req `options`.
370 | """
371 | def pool_options(options) when is_map(options) do
372 | connect_options = options[:connect_options] || []
373 | inet6_options = options |> Map.take([:inet6]) |> Enum.to_list()
374 | pool_options = options |> Map.take([:pool_max_idle_time]) |> Enum.to_list()
375 |
376 | Req.Request.validate_options(
377 | connect_options,
378 | MapSet.new([
379 | :timeout,
380 | :protocols,
381 | :transport_opts,
382 | :proxy_headers,
383 | :proxy,
384 | :client_settings,
385 | :hostname,
386 |
387 | # TODO: Remove on Req v1.0
388 | :protocol
389 | ])
390 | )
391 |
392 | transport_opts =
393 | Keyword.merge(
394 | Keyword.take(connect_options, [:timeout]) ++ inet6_options,
395 | Keyword.get(connect_options, :transport_opts, [])
396 | )
397 |
398 | conn_opts =
399 | Keyword.take(connect_options, [:hostname, :proxy, :proxy_headers, :client_settings]) ++
400 | if transport_opts != [] do
401 | [transport_opts: transport_opts]
402 | else
403 | []
404 | end
405 |
406 | protocols =
407 | cond do
408 | protocols = connect_options[:protocols] ->
409 | protocols
410 |
411 | protocol = connect_options[:protocol] ->
412 | IO.warn([
413 | "setting `connect_options: [protocol: protocol]` is deprecated, ",
414 | "use `connect_options: [protocols: protocols]` instead"
415 | ])
416 |
417 | [protocol]
418 |
419 | true ->
420 | [:http1]
421 | end
422 |
423 | pool_options ++
424 | [protocols: protocols] ++
425 | if conn_opts != [] do
426 | [conn_opts: conn_opts]
427 | else
428 | []
429 | end
430 | end
431 |
432 | def pool_options(options) when is_list(options) do
433 | pool_options(Req.new(options).options)
434 | end
435 | end
436 |
--------------------------------------------------------------------------------
/lib/req/http_error.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.HTTPError do
2 | @moduledoc """
3 | Represents an HTTP protocol error.
4 |
5 | This is a standardised exception that all Req adapters should use for HTTP-protocol-related
6 | errors.
7 |
8 | This exception is based on `Mint.HTTPError`.
9 | """
10 |
11 | defexception [:protocol, :reason]
12 |
13 | @impl true
14 | def message(%{protocol: :http1, reason: reason}) do
15 | Mint.HTTP1.format_error(reason)
16 | rescue
17 | FunctionClauseError ->
18 | "http1 error: #{inspect(reason)}"
19 | end
20 |
21 | def message(%{protocol: :http2, reason: reason}) do
22 | Mint.HTTP2.format_error(reason)
23 | rescue
24 | FunctionClauseError ->
25 | "http2 error: #{inspect(reason)}"
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/lib/req/response.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.Response do
2 | @moduledoc """
3 | The response struct.
4 |
5 | Fields:
6 |
7 | * `:status` - the HTTP status code.
8 |
9 | * `:headers` - the HTTP response headers. The header names should be downcased.
10 | See also "Headers" section in `Req` module documentation.
11 |
12 | * `:body` - the HTTP response body.
13 |
14 | * `:trailers` - the HTTP response trailers. The trailer names must be downcased.
15 |
16 | * `:private` - a map reserved for libraries and frameworks to use.
17 | Prefix the keys with the name of your project to avoid any future
18 | conflicts. Only accepts `t:atom/0` keys.
19 | """
20 |
21 | @type t() :: %__MODULE__{
22 | status: non_neg_integer(),
23 | headers: %{optional(binary()) => [binary()]},
24 | body: binary() | %Req.Response.Async{} | term(),
25 | trailers: %{optional(binary()) => [binary()]},
26 | private: map()
27 | }
28 |
29 | defstruct status: 200,
30 | headers: Req.Fields.new([]),
31 | body: "",
32 | trailers: Req.Fields.new([]),
33 | private: %{}
34 |
35 | @doc """
36 | Returns a new response.
37 |
38 | Expects a keyword list, map, or struct containing the response keys.
39 |
40 | ## Example
41 |
42 | iex> Req.Response.new(status: 200, body: "body")
43 | %Req.Response{status: 200, headers: %{}, body: "body"}
44 |
45 | iex> finch_response = %Finch.Response{status: 200, headers: [{"content-type", "text/html"}]}
46 | iex> Req.Response.new(finch_response)
47 | %Req.Response{status: 200, headers: %{"content-type" => ["text/html"]}, body: ""}
48 |
49 | """
50 | @spec new(options :: keyword() | map() | struct()) :: t()
51 | def new(options \\ [])
52 |
53 | def new(%{} = options) do
54 | options =
55 | Map.take(options, [:status, :headers, :body, :trailers])
56 | |> Map.update(
57 | :headers,
58 | Req.Fields.new([]),
59 | &Req.Fields.new_without_normalize_with_duplicates/1
60 | )
61 | |> Map.update(
62 | :trailers,
63 | Req.Fields.new([]),
64 | &Req.Fields.new_without_normalize_with_duplicates/1
65 | )
66 |
67 | struct!(__MODULE__, options)
68 | end
69 |
70 | def new(options) when is_list(options) do
71 | new(Map.new(options))
72 | end
73 |
74 | @doc """
75 | Builds or updates a response with JSON body.
76 |
77 | ## Example
78 |
79 | iex> Req.Response.json(%{hello: 42})
80 | %Req.Response{
81 | status: 200,
82 | headers: %{"content-type" => ["application/json"]},
83 | body: ~s|{"hello":42}|
84 | }
85 |
86 | iex> resp = Req.Response.new()
87 | iex> Req.Response.json(resp, %{hello: 42})
88 | %Req.Response{
89 | status: 200,
90 | headers: %{"content-type" => ["application/json"]},
91 | body: ~s|{"hello":42}|
92 | }
93 |
94 | If the request already contains a 'content-type' header, it is kept as is:
95 |
96 | iex> Req.Response.new()
97 | iex> |> Req.Response.put_header("content-type", "application/vnd.api+json; charset=utf-8")
98 | iex> |> Req.Response.json(%{hello: 42})
99 | %Req.Response{
100 | status: 200,
101 | headers: %{"content-type" => ["application/vnd.api+json; charset=utf-8"]},
102 | body: ~s|{"hello":42}|
103 | }
104 | """
105 | @spec json(t(), body :: term()) :: t()
106 | def json(response \\ new(), body) do
107 | response =
108 | update_in(response.headers, &Req.Fields.put_new(&1, "content-type", "application/json"))
109 |
110 | Map.replace!(response, :body, Jason.encode!(body))
111 | end
112 |
113 | @doc """
114 | Gets the value for a specific private `key`.
115 | """
116 | @spec get_private(t(), key :: atom(), default :: term()) :: term()
117 | def get_private(%Req.Response{} = response, key, default \\ nil) when is_atom(key) do
118 | Map.get(response.private, key, default)
119 | end
120 |
121 | @doc """
122 | Assigns a private `key` to `value`.
123 | """
124 | @spec put_private(t(), key :: atom(), value :: term()) :: t()
125 | def put_private(%Req.Response{} = response, key, value) when is_atom(key) do
126 | put_in(response.private[key], value)
127 | end
128 |
129 | @doc """
130 | Updates private `key` with the given function.
131 |
132 | If `key` is present in request private map then the existing value is passed to `fun` and its
133 | result is used as the updated value of `key`. If `key` is not present, `default` is inserted
134 | as the value of `key`. The default value will not be passed through the update function.
135 |
136 | ## Examples
137 |
138 | iex> resp = %Req.Response{private: %{a: 1}}
139 | iex> Req.Response.update_private(resp, :a, 11, & &1 + 1).private
140 | %{a: 2}
141 | iex> Req.Response.update_private(resp, :b, 11, & &1 + 1).private
142 | %{a: 1, b: 11}
143 | """
144 | @spec update_private(t(), key :: atom(), default :: term(), (atom() -> term())) :: t()
145 | def update_private(%Req.Response{} = response, key, initial, fun)
146 | when is_atom(key) and is_function(fun, 1) do
147 | update_in(response.private, &Map.update(&1, key, initial, fun))
148 | end
149 |
150 | @doc """
151 | Returns the values of the header specified by `name`.
152 |
153 | See also "Headers" section in `Req` module documentation.
154 |
155 | ## Examples
156 |
157 | iex> Req.Response.get_header(response, "content-type")
158 | ["application/json"]
159 | """
160 | @spec get_header(t(), binary()) :: [binary()]
161 | def get_header(%Req.Response{} = resp, name) when is_binary(name) do
162 | Req.Fields.get_values(resp.headers, name)
163 | end
164 |
165 | @doc """
166 | Adds a new response header `name` if not present, otherwise replaces the
167 | previous value of that header with `value`.
168 |
169 | See also "Headers" section in `Req` module documentation.
170 |
171 | ## Examples
172 |
173 | iex> resp = Req.Response.put_header(%Req.Response{}, "content-type", "application/json")
174 | iex> resp.headers
175 | %{"content-type" => ["application/json"]}
176 | """
177 | @spec put_header(t(), binary(), binary()) :: t()
178 | def put_header(%Req.Response{} = resp, name, value) when is_binary(name) and is_binary(value) do
179 | update_in(resp.headers, &Req.Fields.put(&1, name, value))
180 | end
181 |
182 | @doc """
183 | Deletes the header given by `name`.
184 |
185 | All occurrences of the header are deleted, in case the header is repeated multiple times.
186 |
187 | See also "Headers" section in `Req` module documentation.
188 |
189 | ## Examples
190 |
191 | iex> Req.Response.get_header(resp, "cache-control")
192 | ["max-age=600", "no-transform"]
193 | iex> resp = Req.Response.delete_header(resp, "cache-control")
194 | iex> Req.Response.get_header(resp, "cache-control")
195 | []
196 |
197 | """
198 | def delete_header(%Req.Response{} = resp, name) when is_binary(name) do
199 | update_in(resp.headers, &Req.Fields.delete(&1, name))
200 | end
201 |
202 | @doc """
203 | Returns the `retry-after` header delay value or nil if not found.
204 | """
205 | @spec get_retry_after(t()) :: integer() | nil
206 | def get_retry_after(response) do
207 | case get_header(response, "retry-after") do
208 | [delay] ->
209 | retry_delay_in_ms(delay)
210 |
211 | [] ->
212 | nil
213 | end
214 | end
215 |
216 | defp retry_delay_in_ms(delay_value) do
217 | case Integer.parse(delay_value) do
218 | {seconds, ""} ->
219 | :timer.seconds(seconds)
220 |
221 | :error ->
222 | delay_value
223 | |> Req.Utils.parse_http_date!()
224 | |> DateTime.diff(DateTime.utc_now(), :millisecond)
225 | |> max(0)
226 | end
227 | end
228 | end
229 |
--------------------------------------------------------------------------------
/lib/req/response_async.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.Response.Async do
2 | @moduledoc """
3 | Asynchronous response body.
4 |
5 | This is the `response.body` when making a request with `into: :self`, that is,
6 | streaming response body chunks to the current process mailbox.
7 |
8 | This struct implements the `Enumerable` protocol where each element is a body chunk received
9 | from the current process mailbox. HTTP Trailer fields are ignored.
10 |
11 | If the request is sent using HTTP/1, an extra process is spawned to consume messages from the
12 | underlying socket. On both HTTP/1 and HTTP/2 the messages are sent to the current process as
13 | soon as they arrive, as a firehose. If you wish to maximize request rate or have more control
14 | over how messages are streamed, use `into: fun` or `into: collectable` instead.
15 |
16 | **Note:** This feature is currently experimental and it may change in future releases.
17 |
18 | ## Examples
19 |
20 | iex> resp = Req.get!("https://reqbin.org/ndjson?delay=1000", into: :self)
21 | iex> resp.body
22 | #Req.Response.Async<...>
23 | iex> Enum.each(resp.body, &IO.puts/1)
24 | # {"id":0}
25 | # {"id":1}
26 | # {"id":2}
27 | :ok
28 | """
29 |
30 | @derive {Inspect, only: []}
31 | defstruct [:pid, :ref, :stream_fun, :cancel_fun]
32 |
33 | defimpl Enumerable do
34 | def count(_async), do: {:error, __MODULE__}
35 |
36 | def member?(_async, _value), do: {:error, __MODULE__}
37 |
38 | def slice(_async), do: {:error, __MODULE__}
39 |
40 | def reduce(async, {:halt, acc}, _fun) do
41 | cancel(async)
42 | {:halted, acc}
43 | end
44 |
45 | def reduce(async, {:suspend, acc}, fun) do
46 | {:suspended, acc, &reduce(async, &1, fun)}
47 | end
48 |
49 | def reduce(async, {:cont, acc}, fun) do
50 | if async.pid != self() do
51 | raise "expected to read body chunk in the process #{inspect(async.pid)} which made the request, got: #{inspect(self())}"
52 | end
53 |
54 | ref = async.ref
55 |
56 | receive do
57 | {^ref, _} = message ->
58 | case async.stream_fun.(async.ref, message) do
59 | {:ok, [data: data]} ->
60 | result =
61 | try do
62 | fun.(data, acc)
63 | rescue
64 | e ->
65 | cancel(async)
66 | reraise e, __STACKTRACE__
67 | end
68 |
69 | reduce(async, result, fun)
70 |
71 | {:ok, [:done]} ->
72 | {:done, acc}
73 |
74 | {:ok, [trailers: _trailers]} ->
75 | reduce(async, {:cont, acc}, fun)
76 |
77 | {:error, e} ->
78 | raise e
79 |
80 | other ->
81 | raise "unexpected message: #{inspect(other)}"
82 | end
83 | end
84 | end
85 |
86 | defp cancel(async) do
87 | async.cancel_fun.(async.ref)
88 | end
89 | end
90 | end
91 |
--------------------------------------------------------------------------------
/lib/req/test.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.Test do
2 | @moduledoc """
3 | Req testing conveniences.
4 |
5 | Req is composed of:
6 |
7 | * `Req` - the high-level API
8 |
9 | * `Req.Request` - the low-level API and the request struct
10 |
11 | * `Req.Steps` - the collection of built-in steps
12 |
13 | * `Req.Test` - the testing conveniences (you're here!)
14 |
15 | Req already has built-in support for different variants of stubs via `:plug`, `:adapter`,
16 | and (indirectly) `:base_url` options. With this module you can:
17 |
18 | * Create request stubs using [`Req.Test.stub(name, plug)`](`stub/2`) and mocks
19 | using [`Req.Test.expect(name, count, plug)`](`expect/3`). Both can be used in concurrent
20 | tests.
21 |
22 | * Configure Req to run requests through mocks/stubs by setting `plug: {Req.Test, name}`.
23 | This works because `Req.Test` itself is a plug whose job is to fetch the mocks/stubs under
24 | `name`.
25 |
26 | * Easily create JSON responses with [`Req.Test.json(conn, body)`](`json/2`),
27 | HTML responses with [`Req.Test.html(conn, body)`](`html/2`), and
28 | text responses with [`Req.Test.text(conn, body)`](`text/2`).
29 |
30 | * Simulate network errors with [`Req.Test.transport_error(conn, reason)`](`transport_error/2`).
31 |
32 | Mocks and stubs are using the same ownership model of
33 | [nimble_ownership](https://hex.pm/packages/nimble_ownership), also used by
34 | [Mox](https://hex.pm/packages/mox). This allows `Req.Test` to be used in concurrent tests.
35 |
36 | ## Example
37 |
38 | Imagine we're building an app that displays weather for a given location using an HTTP weather
39 | service:
40 |
41 | defmodule MyApp.Weather do
42 | def get_rating(location) do
43 | case get_temperature(location) do
44 | {:ok, %{status: 200, body: %{"celsius" => celsius}}} ->
45 | cond do
46 | celsius < 18.0 -> {:ok, :too_cold}
47 | celsius < 30.0 -> {:ok, :nice}
48 | true -> {:ok, :too_hot}
49 | end
50 |
51 | _ ->
52 | :error
53 | end
54 | end
55 |
56 | def get_temperature(location) do
57 | [
58 | base_url: "https://weather-service",
59 | params: [location: location]
60 | ]
61 | |> Keyword.merge(Application.get_env(:myapp, :weather_req_options, []))
62 | |> Req.request()
63 | end
64 | end
65 |
66 | We configure it for production:
67 |
68 | # config/runtime.exs
69 | config :myapp, weather_req_options: [
70 | auth: {:bearer, System.fetch_env!("MYAPP_WEATHER_API_KEY")}
71 | ]
72 |
73 | In tests, instead of hitting the network, we make the request against
74 | a [plug](`Req.Steps.run_plug/1`) _stub_ named `MyApp.Weather`:
75 |
76 | # config/test.exs
77 | config :myapp, weather_req_options: [
78 | plug: {Req.Test, MyApp.Weather}
79 | ]
80 |
81 | Now we can control our stubs **in concurrent tests**:
82 |
83 | use ExUnit.Case, async: true
84 |
85 | test "nice weather" do
86 | Req.Test.stub(MyApp.Weather, fn conn ->
87 | Req.Test.json(conn, %{"celsius" => 25.0})
88 | end)
89 |
90 | assert MyApp.Weather.get_rating("Krakow, Poland") == {:ok, :nice}
91 | end
92 |
93 | ## Concurrency and Allowances
94 |
95 | The example above works in concurrent tests because `MyApp.Weather.get_rating/1` calls
96 | directly to `Req.request/1` *in the same process*. It also works in many cases where the
97 | request happens in a spawned process, such as a `Task`, `GenServer`, and more.
98 |
99 | However, if you are encountering issues with stubs not being available in spawned processes,
100 | it's likely that you'll need **explicit allowances**. For example, if
101 | `MyApp.Weather.get_rating/1` was calling `Req.request/1` in a process spawned with `spawn/1`,
102 | the stub would not be available in the spawned process:
103 |
104 | # With code like this, the stub would not be available in the spawned task:
105 | def get_rating_async(location) do
106 | spawn(fn -> get_rating(location) end)
107 | end
108 |
109 | To make stubs defined in the test process available in other processes, you can use
110 | `allow/3`. For example, imagine that the call to `MyApp.Weather.get_rating/1`
111 | was happening in a spawned GenServer:
112 |
113 | test "nice weather" do
114 | {:ok, pid} = start_gen_server(...)
115 |
116 | Req.Test.stub(MyApp.Weather, fn conn ->
117 | Req.Test.json(conn, %{"celsius" => 25.0})
118 | end)
119 |
120 | Req.Test.allow(MyApp.Weather, self(), pid)
121 |
122 | assert get_weather(pid, "Krakow, Poland") == {:ok, :nice}
123 | end
124 |
125 | ## Broadway
126 |
127 | If you're using `Req.Test` with [Broadway](https://hex.pm/broadway), you may need to use
128 | `allow/3` to make stubs available in the Broadway processors. A great way to do that is
129 | to hook into the [Telemetry](https://hex.pm/telemetry) events that Broadway publishes to
130 | manually allow the processors and batch processors to access the stubs. This approach is
131 | similar to what is [documented in Broadway
132 | itself](https://hexdocs.pm/broadway/Broadway.html#module-testing-with-ecto).
133 |
134 | First, you should add the test PID (which is allowed to use the Req stub) to the metadata
135 | for the test events you're publishing:
136 |
137 | Broadway.test_message(MyApp.Pipeline, message, metadata: %{req_stub_owner: self()})
138 |
139 | Then, you'll need to define a test helper to hook into the Telemetry events. For example,
140 | in your `test/test_helper.exs` file:
141 |
142 | defmodule BroadwayReqStubs do
143 | def attach(stub) do
144 | events = [
145 | [:broadway, :processor, :start],
146 | [:broadway, :batch_processor, :start],
147 | ]
148 |
149 | :telemetry.attach_many({__MODULE__, stub}, events, &__MODULE__.handle_event/4, %{stub: stub})
150 | end
151 |
152 | def handle_event(_event_name, _event_measurement, %{messages: messages}, %{stub: stub}) do
153 | with [%Broadway.Message{metadata: %{req_stub_owner: pid}} | _] <- messages do
154 | :ok = Req.Test.allow(stub, pid, self())
155 | end
156 |
157 | :ok
158 | end
159 | end
160 |
161 | Last but not least, attach the helper in your `test/test_helper.exs`:
162 |
163 | BroadwayReqStubs.attach(MyStub)
164 |
165 | """
166 |
167 | require Logger
168 |
169 | @typep name() :: term()
170 |
171 | if Code.ensure_loaded?(Plug.Conn) do
172 | @typep plug() ::
173 | module()
174 | | {module(), term()}
175 | | (Plug.Conn.t() -> Plug.Conn.t())
176 | | (Plug.Conn.t(), term() -> Plug.Conn.t())
177 | else
178 | @typep plug() ::
179 | module()
180 | | {module, term()}
181 | | (conn :: term() -> term())
182 | | (conn :: term(), term() -> term())
183 | end
184 |
185 | @ownership Req.Test.Ownership
186 |
187 | @doc """
188 | Sends JSON response.
189 |
190 | ## Examples
191 |
192 | iex> plug = fn conn ->
193 | ...> Req.Test.json(conn, %{celsius: 25.0})
194 | ...> end
195 | iex>
196 | iex> resp = Req.get!(plug: plug)
197 | iex> resp.headers["content-type"]
198 | ["application/json; charset=utf-8"]
199 | iex> resp.body
200 | %{"celsius" => 25.0}
201 |
202 | """
203 | if Code.ensure_loaded?(Plug.Test) do
204 | @spec json(Plug.Conn.t(), term()) :: Plug.Conn.t()
205 | def json(%Plug.Conn{} = conn, data) do
206 | send_resp(conn, conn.status || 200, "application/json", Jason.encode_to_iodata!(data))
207 | end
208 |
209 | defp send_resp(conn, default_status, default_content_type, body) do
210 | conn
211 | |> ensure_resp_content_type(default_content_type)
212 | |> Plug.Conn.send_resp(conn.status || default_status, body)
213 | end
214 |
215 | defp ensure_resp_content_type(%Plug.Conn{resp_headers: resp_headers} = conn, content_type) do
216 | if List.keyfind(resp_headers, "content-type", 0) do
217 | conn
218 | else
219 | content_type = content_type <> "; charset=utf-8"
220 | %{conn | resp_headers: [{"content-type", content_type} | resp_headers]}
221 | end
222 | end
223 | else
224 | def json(_conn, _data) do
225 | Logger.error("""
226 | Could not find plug dependency.
227 |
228 | Please add :plug to your dependencies:
229 |
230 | {:plug, "~> 1.0"}
231 | """)
232 |
233 | raise "missing plug dependency"
234 | end
235 | end
236 |
237 | @doc """
238 | Sends HTML response.
239 |
240 | ## Examples
241 |
242 | iex> plug = fn conn ->
243 | ...> Req.Test.html(conn, "Hello, World!
")
244 | ...> end
245 | iex>
246 | iex> resp = Req.get!(plug: plug)
247 | iex> resp.headers["content-type"]
248 | ["text/html; charset=utf-8"]
249 | iex> resp.body
250 | "Hello, World!
"
251 |
252 | """
253 | if Code.ensure_loaded?(Plug.Test) do
254 | @spec html(Plug.Conn.t(), iodata()) :: Plug.Conn.t()
255 | def html(%Plug.Conn{} = conn, data) do
256 | send_resp(conn, conn.status || 200, "text/html", data)
257 | end
258 | else
259 | def html(_conn, _data) do
260 | Logger.error("""
261 | Could not find plug dependency.
262 |
263 | Please add :plug to your dependencies:
264 |
265 | {:plug, "~> 1.0"}
266 | """)
267 |
268 | raise "missing plug dependency"
269 | end
270 | end
271 |
272 | @doc """
273 | Sends text response.
274 |
275 | ## Examples
276 |
277 | iex> plug = fn conn ->
278 | ...> Req.Test.text(conn, "Hello, World!")
279 | ...> end
280 | iex>
281 | iex> resp = Req.get!(plug: plug)
282 | iex> resp.headers["content-type"]
283 | ["text/plain; charset=utf-8"]
284 | iex> resp.body
285 | "Hello, World!"
286 |
287 | """
288 | if Code.ensure_loaded?(Plug.Test) do
289 | @spec text(Plug.Conn.t(), iodata()) :: Plug.Conn.t()
290 | def text(%Plug.Conn{} = conn, data) do
291 | send_resp(conn, conn.status || 200, "text/plain", data)
292 | end
293 | else
294 | def text(_conn, _data) do
295 | Logger.error("""
296 | Could not find plug dependency.
297 |
298 | Please add :plug to your dependencies:
299 |
300 | {:plug, "~> 1.0"}
301 | """)
302 |
303 | raise "missing plug dependency"
304 | end
305 | end
306 |
307 | @doc """
308 | Simulates a network transport error.
309 |
310 | ## Examples
311 |
312 | iex> plug = fn conn ->
313 | ...> Req.Test.transport_error(conn, :timeout)
314 | ...> end
315 | iex>
316 | iex> Req.get(plug: plug, retry: false)
317 | {:error, %Req.TransportError{reason: :timeout}}
318 |
319 | """
320 | @doc since: "0.5.0"
321 | def transport_error(conn, reason)
322 |
323 | if Code.ensure_loaded?(Plug.Conn) do
324 | @spec transport_error(Plug.Conn.t(), reason :: atom()) :: Plug.Conn.t()
325 | def transport_error(%Plug.Conn{} = conn, reason) do
326 | validate_transport_error!(reason)
327 | exception = Req.TransportError.exception(reason: reason)
328 | put_in(conn.private[:req_test_exception], exception)
329 | end
330 |
331 | defp validate_transport_error!(:protocol_not_negotiated), do: :ok
332 | defp validate_transport_error!({:bad_alpn_protocol, _}), do: :ok
333 | defp validate_transport_error!(:closed), do: :ok
334 | defp validate_transport_error!(:timeout), do: :ok
335 |
336 | defp validate_transport_error!(reason) do
337 | case :ssl.format_error(reason) do
338 | ~c"Unexpected error:" ++ _ ->
339 | raise ArgumentError, "unexpected Req.TransportError reason: #{inspect(reason)}"
340 |
341 | _ ->
342 | :ok
343 | end
344 | end
345 | else
346 | def transport_error(_conn, _reason) do
347 | Logger.error("""
348 | Could not find plug dependency.
349 |
350 | Please add :plug to your dependencies:
351 |
352 | {:plug, "~> 1.0"}
353 | """)
354 |
355 | raise "missing plug dependency"
356 | end
357 | end
358 |
359 | @doc false
360 | @deprecated "Don't manually fetch stubs. See the documentation for Req.Test instead."
361 | def stub(name) do
362 | __fetch_plug__(name)
363 | end
364 |
365 | def __fetch_plug__(name) do
366 | case Req.Test.Ownership.fetch_owner(@ownership, callers(), name) do
367 | {tag, owner} when is_pid(owner) and tag in [:ok, :shared_owner] ->
368 | result =
369 | Req.Test.Ownership.get_and_update(@ownership, owner, name, fn
370 | %{expectations: [value | rest]} = map ->
371 | {{:ok, value}, put_in(map[:expectations], rest)}
372 |
373 | %{stub: value} = map ->
374 | {{:ok, value}, map}
375 |
376 | %{expectations: []} = map ->
377 | {{:error, :no_expectations_and_no_stub}, map}
378 | end)
379 |
380 | case result do
381 | {:ok, {:ok, value}} ->
382 | value
383 |
384 | {:ok, {:error, :no_expectations_and_no_stub}} ->
385 | raise "no mock or stub for #{inspect(name)}"
386 | end
387 |
388 | :error ->
389 | raise "cannot find mock/stub #{inspect(name)} in process #{inspect(self())}"
390 | end
391 | end
392 |
393 | defguardp is_plug(value)
394 | when is_function(value, 1) or
395 | is_function(value, 2) or
396 | is_atom(value) or
397 | (is_tuple(value) and tuple_size(value) == 2 and is_atom(elem(value, 0)))
398 |
399 | @doc """
400 | Creates a request stub with the given `name` and `plug`.
401 |
402 | Req allows running requests against _plugs_ (instead of over the network) using the
403 | [`:plug`](`Req.Steps.run_plug/1`) option. However, passing the `:plug` value throughout the
404 | system can be cumbersome. Instead, you can tell Req to find plugs by `name` by setting
405 | `plug: {Req.Test, name}`, and register plug stubs for that `name` by calling
406 | `Req.Test.stub(name, plug)`. In other words, multiple concurrent tests can register test stubs
407 | under the same `name`, and when Req makes the request, it will find the appropriate
408 | implementation, even when invoked from different processes than the test process.
409 |
410 | The `name` can be any term.
411 |
412 | The `plug` can be one of:
413 |
414 | * A _function_ plug: a `fun(conn)` or `fun(conn, options)` function that takes a
415 | `Plug.Conn` and returns a `Plug.Conn`.
416 |
417 | * A _module_ plug: a `module` name or a `{module, options}` tuple.
418 |
419 | ## Examples
420 |
421 | iex> Req.Test.stub(MyStub, fn conn ->
422 | ...> send(self(), :req_happened)
423 | ...> Req.Test.json(conn, %{})
424 | ...> end)
425 | :ok
426 | iex> Req.get!(plug: {Req.Test, MyStub}).body
427 | %{}
428 | iex> receive do
429 | ...> :req_happened -> :ok
430 | ...> end
431 | :ok
432 |
433 | """
434 | @doc type: :mock
435 | @spec stub(name(), plug()) :: :ok
436 | def stub(name, plug) when is_plug(plug) do
437 | {:ok, :ok} =
438 | Req.Test.Ownership.get_and_update(@ownership, self(), name, fn map_or_nil ->
439 | {:ok, put_in(map_or_nil || %{}, [:stub], plug)}
440 | end)
441 |
442 | :ok
443 | end
444 |
445 | @doc """
446 | Creates a request expectation with the given `name` and `plug`, expected to be fetched at
447 | most `n` times, **in order**.
448 |
449 | This function allows you to expect a `n` number of request and handle them **in order** via the
450 | given `plug`. It is safe to use in concurrent tests. If you fetch the value under `name` more
451 | than `n` times, this function raises a `RuntimeError`.
452 |
453 | The `name` can be any term.
454 |
455 | The `plug` can be one of:
456 |
457 | * A _function_ plug: a `fun(conn)` or `fun(conn, options)` function that takes a
458 | `Plug.Conn` and returns a `Plug.Conn`.
459 |
460 | * A _module_ plug: a `module` name or a `{module, options}` tuple.
461 |
462 | See `stub/2` and module documentation for more information.
463 |
464 | ## Examples
465 |
466 | Let's simulate a server that is having issues: on the first request it is not responding
467 | and on the following two requests it returns an HTTP 500. Only on the third request it returns
468 | an HTTP 200. Req by default automatically retries transient errors (using `Req.Steps.retry/1`)
469 | so it will make multiple requests exercising all of our request expectations:
470 |
471 | iex> Req.Test.expect(MyStub, &Req.Test.transport_error(&1, :econnrefused))
472 | iex> Req.Test.expect(MyStub, 2, &Plug.Conn.send_resp(&1, 500, "internal server error"))
473 | iex> Req.Test.expect(MyStub, &Plug.Conn.send_resp(&1, 200, "ok"))
474 | iex> Req.get!(plug: {Req.Test, MyStub}).body
475 | # 15:57:06.309 [warning] retry: got exception, will retry in 1000ms, 3 attempts left
476 | # 15:57:06.309 [warning] ** (Req.TransportError) connection refused
477 | # 15:57:07.310 [warning] retry: got response with status 500, will retry in 2000ms, 2 attempts left
478 | # 15:57:09.311 [warning] retry: got response with status 500, will retry in 4000ms, 1 attempt left
479 | "ok"
480 |
481 | iex> Req.request!(plug: {Req.Test, MyStub})
482 | ** (RuntimeError) no mock or stub for MyStub
483 |
484 | """
485 | @doc since: "0.4.15"
486 | @doc type: :mock
487 | @spec expect(name(), pos_integer(), plug()) :: name()
488 | def expect(name, n \\ 1, plug) when is_integer(n) and n > 0 do
489 | plugs = List.duplicate(plug, n)
490 |
491 | {:ok, :ok} =
492 | Req.Test.Ownership.get_and_update(@ownership, self(), name, fn map_or_nil ->
493 | {:ok, Map.update(map_or_nil || %{}, :expectations, plugs, &(&1 ++ plugs))}
494 | end)
495 |
496 | name
497 | end
498 |
499 | @doc """
500 | Allows `pid_to_allow` to access `name` provided that `owner` is already allowed.
501 | """
502 | @doc type: :mock
503 | @spec allow(name(), pid(), pid() | (-> pid())) :: :ok | {:error, Exception.t()}
504 | def allow(name, owner, pid_to_allow) when is_pid(owner) do
505 | Req.Test.Ownership.allow(@ownership, owner, pid_to_allow, name)
506 | end
507 |
508 | @doc """
509 | Sets the `Req.Test` mode to "global", meaning that the stubs are shared across all tests
510 | and cannot be used concurrently.
511 | """
512 | @doc since: "0.5.0"
513 | @doc type: :mock
514 | @spec set_req_test_to_shared(ex_unit_context :: term()) :: :ok
515 | def set_req_test_to_shared(_context \\ %{}) do
516 | Req.Test.Ownership.set_mode_to_shared(@ownership, self())
517 | end
518 |
519 | @doc """
520 | Sets the `Req.Test` mode to "private", meaning that stubs can be shared across
521 | tests concurrently.
522 | """
523 | @doc type: :mock
524 | @doc since: "0.5.0"
525 | @spec set_req_test_to_private(ex_unit_context :: term()) :: :ok
526 | def set_req_test_to_private(_context \\ %{}) do
527 | Req.Test.Ownership.set_mode_to_private(@ownership)
528 | end
529 |
530 | @doc """
531 | Sets the `Req.Test` mode based on the given `ExUnit` context.
532 |
533 | This works as a ExUnit callback:
534 |
535 | setup :set_req_test_from_context
536 |
537 | """
538 | @doc since: "0.5.0"
539 | @doc type: :mock
540 | @spec set_req_test_from_context(ex_unit_context :: term()) :: :ok
541 | def set_req_test_from_context(_context \\ %{})
542 |
543 | def set_req_test_from_context(%{async: true} = context), do: set_req_test_to_private(context)
544 | def set_req_test_from_context(context), do: set_req_test_to_shared(context)
545 |
546 | @doc """
547 | Sets a ExUnit callback to verify the expectations on exit.
548 |
549 | Similar to calling `verify!/0` at the end of your test.
550 | """
551 | @doc since: "0.5.0"
552 | @doc type: :mock
553 | @spec verify_on_exit!(term()) :: :ok
554 | def verify_on_exit!(_context \\ %{}) do
555 | pid = self()
556 | Req.Test.Ownership.set_owner_to_manual_cleanup(@ownership, pid)
557 |
558 | ExUnit.Callbacks.on_exit(Mox, fn ->
559 | verify(pid, :all)
560 | Req.Test.Ownership.cleanup_owner(@ownership, pid)
561 | end)
562 | end
563 |
564 | @doc """
565 | Verifies that all the plugs expected to be executed within any scope have been executed.
566 | """
567 | @doc since: "0.5.0"
568 | @doc type: :mock
569 | @spec verify!() :: :ok
570 | def verify! do
571 | verify(self(), :all)
572 | end
573 |
574 | @doc """
575 | Verifies that all the plugs expected to be executed within the scope of `name` have been
576 | executed.
577 | """
578 | @doc type: :mock
579 | @doc since: "0.5.0"
580 | @spec verify!(name()) :: :ok
581 | def verify!(name) do
582 | verify(self(), name)
583 | end
584 |
585 | defp verify(owner_pid, mock_or_all) do
586 | messages =
587 | for {name, stubs_and_expecs} <-
588 | Req.Test.Ownership.get_owned(@ownership, owner_pid, _default = %{}, 5000),
589 | name == mock_or_all or mock_or_all == :all,
590 | pending_count = stubs_and_expecs |> Map.get(:expectations, []) |> length(),
591 | pending_count > 0 do
592 | " * expected #{inspect(name)} to be still used #{pending_count} more times"
593 | end
594 |
595 | if messages != [] do
596 | raise "error while verifying Req.Test expectations for #{inspect(owner_pid)}:\n\n" <>
597 | Enum.join(messages, "\n")
598 | end
599 |
600 | :ok
601 | end
602 |
603 | ## Helpers
604 |
605 | defp callers do
606 | [self() | Process.get(:"$callers") || []]
607 | end
608 |
609 | ## Plug callbacks
610 |
611 | if Code.ensure_loaded?(Plug) do
612 | @behaviour Plug
613 | end
614 |
615 | @doc false
616 | def init(name) do
617 | name
618 | end
619 |
620 | @doc false
621 | def call(conn, name) do
622 | case __fetch_plug__(name) do
623 | fun when is_function(fun, 1) ->
624 | fun.(conn)
625 |
626 | fun when is_function(fun, 2) ->
627 | fun.(conn, [])
628 |
629 | module when is_atom(module) ->
630 | module.call(conn, module.init([]))
631 |
632 | {module, options} when is_atom(module) ->
633 | module.call(conn, module.init(options))
634 |
635 | other ->
636 | raise """
637 | expected plug to be one of:
638 |
639 | * fun(conn)
640 | * fun(conn, options)
641 | * module
642 | * {module, options}
643 |
644 | got: #{inspect(other)}\
645 | """
646 | end
647 | end
648 | end
649 |
--------------------------------------------------------------------------------
/lib/req/test/ownership.ex:
--------------------------------------------------------------------------------
1 | # Vendored from nimble_ownership v1.0.1, replacing NimbleOwnership.Error with
2 | # Req.Test.OwnershipError.
3 | #
4 | # Check changes with:
5 | #
6 | # git diff --no-index lib/req/test/ownership.ex ../nimble_ownership/lib/nimble_ownership.ex
7 | defmodule Req.Test.Ownership do
8 | @moduledoc false
9 |
10 | defguardp is_timeout(val) when (is_integer(val) and val > 0) or val == :infinity
11 |
12 | use GenServer
13 |
14 | alias Req.Test.OwnershipError, as: Error
15 |
16 | @typedoc "Ownership server."
17 | @type server() :: GenServer.server()
18 |
19 | @typedoc "Arbitrary key."
20 | @type key() :: term()
21 |
22 | @typedoc "Arbitrary metadata associated with an owned `t:key/0`."
23 | @type metadata() :: term()
24 |
25 | @genserver_opts [
26 | :name,
27 | :timeout,
28 | :debug,
29 | :spawn_opt,
30 | :hibernate_after
31 | ]
32 |
33 | @spec start_link(keyword()) :: GenServer.on_start()
34 | def start_link(options \\ []) when is_list(options) do
35 | {genserver_opts, other_opts} = Keyword.split(options, @genserver_opts)
36 |
37 | if other_opts != [] do
38 | raise ArgumentError, "unknown options: #{inspect(Keyword.keys(other_opts))}"
39 | end
40 |
41 | GenServer.start_link(__MODULE__, [], genserver_opts)
42 | end
43 |
44 | @spec allow(server(), pid(), pid() | (-> resolved_pid), key()) ::
45 | :ok | {:error, Error.t()}
46 | when resolved_pid: pid() | [pid()]
47 | def allow(ownership_server, pid_with_access, pid_to_allow, key, timeout \\ 5000)
48 | when is_pid(pid_with_access) and (is_pid(pid_to_allow) or is_function(pid_to_allow, 0)) and
49 | is_timeout(timeout) do
50 | GenServer.call(ownership_server, {:allow, pid_with_access, pid_to_allow, key}, timeout)
51 | end
52 |
53 | @spec get_and_update(server(), pid(), key(), fun, timeout()) ::
54 | {:ok, get_value} | {:error, Error.t()}
55 | when fun: (nil | metadata() -> {get_value, updated_metadata :: metadata()}),
56 | get_value: term()
57 | def get_and_update(ownership_server, owner_pid, key, fun, timeout \\ 5000)
58 | when is_pid(owner_pid) and is_function(fun, 1) and is_timeout(timeout) do
59 | case GenServer.call(ownership_server, {:get_and_update, owner_pid, key, fun}, timeout) do
60 | {:ok, get_value} -> {:ok, get_value}
61 | {:error, %Error{} = error} -> {:error, error}
62 | {:__raise__, error} when is_exception(error) -> raise error
63 | end
64 | end
65 |
66 | @spec fetch_owner(server(), [pid(), ...], key(), timeout()) ::
67 | {:ok, owner :: pid()}
68 | | {:shared_owner, shared_owner :: pid()}
69 | | :error
70 | def fetch_owner(ownership_server, [_ | _] = callers, key, timeout \\ 5000)
71 | when is_timeout(timeout) do
72 | GenServer.call(ownership_server, {:fetch_owner, callers, key}, timeout)
73 | end
74 |
75 | @spec get_owned(server(), pid(), default, timeout()) :: %{key() => metadata()} | default
76 | when default: term()
77 | def get_owned(ownership_server, owner_pid, default \\ nil, timeout \\ 5000)
78 | when is_pid(owner_pid) and is_timeout(timeout) do
79 | GenServer.call(ownership_server, {:get_owned, owner_pid, default}, timeout)
80 | end
81 |
82 | @spec set_mode_to_private(server()) :: :ok
83 | def set_mode_to_private(ownership_server) do
84 | GenServer.call(ownership_server, {:set_mode, :private})
85 | end
86 |
87 | @spec set_mode_to_shared(server(), pid()) :: :ok
88 | def set_mode_to_shared(ownership_server, shared_owner) when is_pid(shared_owner) do
89 | GenServer.call(ownership_server, {:set_mode, {:shared, shared_owner}})
90 | end
91 |
92 | @spec set_owner_to_manual_cleanup(server(), pid()) :: :ok
93 | def set_owner_to_manual_cleanup(ownership_server, owner_pid) do
94 | GenServer.call(ownership_server, {:set_owner_to_manual_cleanup, owner_pid})
95 | end
96 |
97 | @spec cleanup_owner(server(), pid()) :: :ok
98 | def cleanup_owner(ownership_server, owner_pid) when is_pid(owner_pid) do
99 | GenServer.call(ownership_server, {:cleanup_owner, owner_pid})
100 | end
101 |
102 | ## State
103 |
104 | defstruct [
105 | # The mode can be either :private, or {:shared, shared_owner_pid}.
106 | mode: :private,
107 |
108 | # This is a map of %{owner_pid => %{key => metadata}}. Its purpose is to track the metadata
109 | # under each key that a owner owns.
110 | owners: %{},
111 |
112 | # This tracks what to do when each owner goes down. It's a map of
113 | # %{owner_pid => :auto | :manual}.
114 | owner_cleanup: %{},
115 |
116 | # This is a map of %{allowed_pid => %{key => owner_pid}}. Its purpose is to track the keys
117 | # that a PID is allowed to access, alongside which the owner of those keys is.
118 | allowances: %{},
119 |
120 | # This is used to track which PIDs we're monitoring, to avoid double-monitoring.
121 | monitored_pids: MapSet.new()
122 | ]
123 |
124 | ## Callbacks
125 |
126 | @impl true
127 | def init([]) do
128 | {:ok, %__MODULE__{}}
129 | end
130 |
131 | @impl true
132 | def handle_call(call, from, state)
133 |
134 | def handle_call(
135 | {:allow, _pid_with_access, _pid_to_allow, key},
136 | _from,
137 | %__MODULE__{mode: {:shared, _shared_owner}} = state
138 | ) do
139 | error = %Error{key: key, reason: :cant_allow_in_shared_mode}
140 | {:reply, {:error, error}, state}
141 | end
142 |
143 | def handle_call(
144 | {:allow, pid_with_access, pid_to_allow, key},
145 | _from,
146 | %__MODULE__{mode: :private} = state
147 | ) do
148 | if state.owners[pid_to_allow][key] do
149 | error = %Error{key: key, reason: :already_an_owner}
150 | throw({:reply, {:error, error}, state})
151 | end
152 |
153 | owner_pid =
154 | cond do
155 | owner_pid = state.allowances[pid_with_access][key] ->
156 | owner_pid
157 |
158 | _meta = state.owners[pid_with_access][key] ->
159 | pid_with_access
160 |
161 | true ->
162 | throw({:reply, {:error, %Error{key: key, reason: :not_allowed}}, state})
163 | end
164 |
165 | case state.allowances[pid_to_allow][key] do
166 | # There's already another owner PID that is allowing "pid_to_allow" to use "key".
167 | other_owner_pid when is_pid(other_owner_pid) and other_owner_pid != owner_pid ->
168 | error = %Error{key: key, reason: {:already_allowed, other_owner_pid}}
169 | {:reply, {:error, error}, state}
170 |
171 | # "pid_to_allow" is already allowed access to "key" through the same "owner_pid",
172 | # so this is a no-op.
173 | ^owner_pid ->
174 | {:reply, :ok, state}
175 |
176 | nil ->
177 | state =
178 | state
179 | |> maybe_monitor_pid(pid_with_access)
180 | |> put_in([Access.key!(:allowances), Access.key(pid_to_allow, %{}), key], owner_pid)
181 |
182 | {:reply, :ok, state}
183 | end
184 | end
185 |
186 | def handle_call({:get_and_update, owner_pid, key, fun}, _from, %__MODULE__{} = state) do
187 | case state.mode do
188 | {:shared, shared_owner_pid} when shared_owner_pid != owner_pid ->
189 | error = %Error{key: key, reason: {:not_shared_owner, shared_owner_pid}}
190 | throw({:reply, {:error, error}, state})
191 |
192 | _ ->
193 | :ok
194 | end
195 |
196 | state = resolve_lazy_calls_for_key(state, key)
197 |
198 | if other_owner = state.allowances[owner_pid][key] do
199 | throw({:reply, {:error, %Error{key: key, reason: {:already_allowed, other_owner}}}, state})
200 | end
201 |
202 | case fun.(_meta_or_nil = state.owners[owner_pid][key]) do
203 | {get_value, new_meta} ->
204 | state = put_in(state, [Access.key!(:owners), Access.key(owner_pid, %{}), key], new_meta)
205 |
206 | # We should also monitor the new owner, if it hasn't already been monitored. That
207 | # can happen if that owner is already the owner of another key. We ALWAYS monitor,
208 | # so if owner_pid is already an owner we're already monitoring it.
209 | state =
210 | if not Map.has_key?(state.owner_cleanup, owner_pid) do
211 | _ref = Process.monitor(owner_pid)
212 | put_in(state.owner_cleanup[owner_pid], :auto)
213 | else
214 | state
215 | end
216 |
217 | {:reply, {:ok, get_value}, state}
218 |
219 | other ->
220 | message = """
221 | invalid return value from callback function. Expected nil or a tuple of the form \
222 | {get_value, update_value} (see the function's @spec), instead got: #{inspect(other)}\
223 | """
224 |
225 | {:reply, {:__raise__, %ArgumentError{message: message}}, state}
226 | end
227 | end
228 |
229 | def handle_call(
230 | {:fetch_owner, _callers, _key},
231 | _from,
232 | %__MODULE__{mode: {:shared, shared_owner_pid}} = state
233 | ) do
234 | {:reply, {:shared_owner, shared_owner_pid}, state}
235 | end
236 |
237 | def handle_call({:fetch_owner, callers, key}, _from, %__MODULE__{mode: :private} = state) do
238 | {owner, state} =
239 | case fetch_owner_once(state, callers, key) do
240 | nil ->
241 | state = resolve_lazy_calls_for_key(state, key)
242 | {fetch_owner_once(state, callers, key), state}
243 |
244 | owner ->
245 | {owner, state}
246 | end
247 |
248 | if is_nil(owner) do
249 | {:reply, :error, state}
250 | else
251 | {:reply, {:ok, owner}, state}
252 | end
253 | end
254 |
255 | def handle_call({:get_owned, owner_pid, default}, _from, %__MODULE__{} = state) do
256 | {:reply, state.owners[owner_pid] || default, state}
257 | end
258 |
259 | def handle_call({:set_mode, {:shared, shared_owner_pid}}, _from, %__MODULE__{} = state) do
260 | state = maybe_monitor_pid(state, shared_owner_pid)
261 | state = %{state | mode: {:shared, shared_owner_pid}}
262 | {:reply, :ok, state}
263 | end
264 |
265 | def handle_call({:set_mode, :private}, _from, %__MODULE__{} = state) do
266 | {:reply, :ok, %{state | mode: :private}}
267 | end
268 |
269 | def handle_call({:set_owner_to_manual_cleanup, owner_pid}, _from, %__MODULE__{} = state) do
270 | {:reply, :ok, put_in(state.owner_cleanup[owner_pid], :manual)}
271 | end
272 |
273 | def handle_call({:cleanup_owner, pid}, _from, %__MODULE__{} = state) do
274 | {:reply, :ok, pop_owner_and_clean_up_allowances(state, pid)}
275 | end
276 |
277 | @impl true
278 | def handle_info(msg, state)
279 |
280 | # The global owner went down, so we go back to private mode.
281 | def handle_info({:DOWN, _, _, down_pid, _}, %__MODULE__{mode: {:shared, down_pid}} = state) do
282 | {:noreply, %{state | mode: :private}}
283 | end
284 |
285 | # An owner went down, so we need to clean up all of its allowances as well as all its keys.
286 | def handle_info({:DOWN, _ref, _, down_pid, _}, state)
287 | when is_map_key(state.owners, down_pid) do
288 | case state.owner_cleanup[down_pid] || :auto do
289 | :manual ->
290 | {:noreply, state}
291 |
292 | :auto ->
293 | state = pop_owner_and_clean_up_allowances(state, down_pid)
294 | {:noreply, state}
295 | end
296 | end
297 |
298 | # A PID that we were monitoring went down. Let's just clean up all its allowances.
299 | def handle_info({:DOWN, _, _, down_pid, _}, state) do
300 | {_keys_and_values, state} = pop_in(state.allowances[down_pid])
301 | state = update_in(state.monitored_pids, &MapSet.delete(&1, down_pid))
302 | {:noreply, state}
303 | end
304 |
305 | ## Helpers
306 |
307 | defp pop_owner_and_clean_up_allowances(state, target_pid) do
308 | {_, state} = pop_in(state.owners[target_pid])
309 | {_, state} = pop_in(state.owner_cleanup[target_pid])
310 |
311 | allowances =
312 | Enum.reduce(state.allowances, state.allowances, fn {pid, allowances}, acc ->
313 | new_allowances =
314 | for {key, owner_pid} <- allowances,
315 | owner_pid != target_pid,
316 | into: %{},
317 | do: {key, owner_pid}
318 |
319 | Map.put(acc, pid, new_allowances)
320 | end)
321 |
322 | %{state | allowances: allowances}
323 | end
324 |
325 | defp maybe_monitor_pid(state, pid) do
326 | if pid in state.monitored_pids do
327 | state
328 | else
329 | Process.monitor(pid)
330 | update_in(state.monitored_pids, &MapSet.put(&1, pid))
331 | end
332 | end
333 |
334 | defp fetch_owner_once(state, callers, key) do
335 | Enum.find_value(callers, fn caller ->
336 | case state do
337 | %{owners: %{^caller => %{^key => _meta}}} -> caller
338 | %{allowances: %{^caller => %{^key => owner_pid}}} -> owner_pid
339 | _ -> nil
340 | end
341 | end)
342 | end
343 |
344 | defp resolve_lazy_calls_for_key(state, key) do
345 | updated_allowances =
346 | Enum.reduce(state.allowances, state.allowances, fn
347 | {fun, value}, allowances when is_function(fun, 0) and is_map_key(value, key) ->
348 | result =
349 | fun.()
350 | |> List.wrap()
351 | |> Enum.group_by(&is_pid/1)
352 |
353 | allowances =
354 | result
355 | |> Map.get(true, [])
356 | |> Enum.reduce(allowances, fn pid, allowances ->
357 | Map.update(allowances, pid, value, &Map.merge(&1, value))
358 | end)
359 |
360 | if Map.has_key?(allowances, false), do: Map.delete(allowances, fun), else: allowances
361 |
362 | _, allowances ->
363 | allowances
364 | end)
365 |
366 | %{state | allowances: updated_allowances}
367 | end
368 | end
369 |
--------------------------------------------------------------------------------
/lib/req/test/ownership_error.ex:
--------------------------------------------------------------------------------
1 | # Vendored from nimble_ownership. See Req.Test.Ownership.
2 | defmodule Req.Test.OwnershipError do
3 | defexception [:reason, :key]
4 |
5 | @impl true
6 | def message(%__MODULE__{key: key, reason: reason}) do
7 | format_reason(key, reason)
8 | end
9 |
10 | ## Helpers
11 |
12 | defp format_reason(key, {:already_allowed, other_owner_pid}) do
13 | "this PID is already allowed to access key #{inspect(key)} via other owner PID #{inspect(other_owner_pid)}"
14 | end
15 |
16 | defp format_reason(key, :not_allowed) do
17 | "this PID is not allowed to access key #{inspect(key)}"
18 | end
19 |
20 | defp format_reason(key, :already_an_owner) do
21 | "this PID is already an owner of key #{inspect(key)}"
22 | end
23 |
24 | defp format_reason(_key, {:not_shared_owner, pid}) do
25 | "#{inspect(pid)} is not the shared owner, so it cannot update keys"
26 | end
27 |
28 | defp format_reason(_key, :cant_allow_in_shared_mode) do
29 | "cannot allow PIDs in shared mode"
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/lib/req/too_many_redirects_error.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.TooManyRedirectsError do
2 | @moduledoc """
3 | Represents an error when too many redirects occured, returned by `Req.Steps.redirect/1`.
4 | """
5 |
6 | defexception [:max_redirects]
7 |
8 | @impl true
9 | def message(%{max_redirects: max_redirects}) do
10 | "too many redirects (#{max_redirects})"
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/lib/req/transport_error.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.TransportError do
2 | @moduledoc """
3 | Represents an error with the transport used by an HTTP connection.
4 |
5 | This is a standardised exception that all Req adapters should use for transport-layer-related
6 | errors.
7 |
8 | This exception is based on `Mint.TransportError`.
9 | """
10 |
11 | defexception [:reason]
12 |
13 | @impl true
14 | def message(%__MODULE__{reason: reason}) do
15 | Mint.TransportError.message(%Mint.TransportError{reason: reason})
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/lib/req/utils.ex:
--------------------------------------------------------------------------------
1 | defmodule Req.Utils do
2 | @moduledoc false
3 |
4 | defmacrop iodata({:<<>>, _, parts}) do
5 | Enum.map(parts, &to_iodata/1)
6 | end
7 |
8 | defp to_iodata(binary) when is_binary(binary) do
9 | binary
10 | end
11 |
12 | defp to_iodata(
13 | {:"::", _, [{{:., _, [Kernel, :to_string]}, _, [interpolation]}, {:binary, _, nil}]}
14 | ) do
15 | interpolation
16 | end
17 |
18 | @doc """
19 | Create AWS Signature v4.
20 |
21 | https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
22 | """
23 | def aws_sigv4_headers(options) do
24 | {access_key_id, options} = Keyword.pop!(options, :access_key_id)
25 | {secret_access_key, options} = Keyword.pop!(options, :secret_access_key)
26 | {security_token, options} = Keyword.pop(options, :token)
27 | {region, options} = Keyword.pop!(options, :region)
28 | {service, options} = Keyword.pop!(options, :service)
29 | {datetime, options} = Keyword.pop!(options, :datetime)
30 | {method, options} = Keyword.pop!(options, :method)
31 | {url, options} = Keyword.pop!(options, :url)
32 | {headers, options} = Keyword.pop!(options, :headers)
33 | {body, options} = Keyword.pop!(options, :body)
34 | Keyword.validate!(options, [:body_digest])
35 |
36 | datetime = DateTime.truncate(datetime, :second)
37 | datetime_string = DateTime.to_iso8601(datetime, :basic)
38 | date_string = Date.to_iso8601(datetime, :basic)
39 | url = normalize_url(url)
40 | body_digest = options[:body_digest] || hex(sha256(body))
41 | service = to_string(service)
42 |
43 | method = method |> Atom.to_string() |> String.upcase()
44 |
45 | headers = canonical_host_header(headers, url)
46 |
47 | aws_headers = [
48 | {"x-amz-content-sha256", body_digest},
49 | {"x-amz-date", datetime_string}
50 | ]
51 |
52 | aws_headers =
53 | if security_token do
54 | aws_headers ++ [{"x-amz-security-token", security_token}]
55 | else
56 | aws_headers
57 | end
58 |
59 | canonical_headers = headers ++ aws_headers
60 |
61 | ## canonical_headers needs to be sorted for canonical_request construction
62 | canonical_headers = Enum.sort(canonical_headers)
63 |
64 | signed_headers =
65 | Enum.map_intersperse(
66 | Enum.sort(canonical_headers),
67 | ";",
68 | &String.downcase(elem(&1, 0), :ascii)
69 | )
70 |
71 | canonical_headers =
72 | Enum.map_intersperse(canonical_headers, "\n", fn {name, value} -> [name, ":", value] end)
73 |
74 | path = URI.encode(url.path || "/", &(&1 == ?/ or URI.char_unreserved?(&1)))
75 |
76 | canonical_query = canonical_query(url.query)
77 |
78 | canonical_request = """
79 | #{method}
80 | #{path}
81 | #{canonical_query}
82 | #{canonical_headers}
83 |
84 | #{signed_headers}
85 | #{body_digest}\
86 | """
87 |
88 | string_to_sign =
89 | iodata("""
90 | AWS4-HMAC-SHA256
91 | #{datetime_string}
92 | #{date_string}/#{region}/#{service}/aws4_request
93 | #{hex(sha256(canonical_request))}\
94 | """)
95 |
96 | signature =
97 | aws_sigv4(
98 | string_to_sign,
99 | date_string,
100 | region,
101 | service,
102 | secret_access_key
103 | )
104 |
105 | credential = "#{access_key_id}/#{date_string}/#{region}/#{service}/aws4_request"
106 |
107 | authorization =
108 | "AWS4-HMAC-SHA256 Credential=#{credential},SignedHeaders=#{signed_headers},Signature=#{signature}"
109 |
110 | [{"authorization", authorization}] ++ aws_headers ++ headers
111 | end
112 |
113 | defp canonical_query(query) when query in [nil, ""] do
114 | query
115 | end
116 |
117 | defp canonical_query(query) do
118 | for item <- String.split(query, "&", trim: true) do
119 | case String.split(item, "=") do
120 | [name, value] -> [name, "=", value]
121 | [name] -> [name, "="]
122 | end
123 | end
124 | |> Enum.sort()
125 | |> Enum.intersperse("&")
126 | end
127 |
128 | @doc """
129 | Create AWS Signature v4 URL.
130 |
131 | https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
132 | """
133 | def aws_sigv4_url(options) do
134 | {access_key_id, options} = Keyword.pop!(options, :access_key_id)
135 | {secret_access_key, options} = Keyword.pop!(options, :secret_access_key)
136 | {region, options} = Keyword.pop!(options, :region)
137 | {service, options} = Keyword.pop!(options, :service)
138 | {datetime, options} = Keyword.pop!(options, :datetime)
139 | {method, options} = Keyword.pop!(options, :method)
140 | {url, options} = Keyword.pop!(options, :url)
141 | {expires, options} = Keyword.pop(options, :expires, 86400)
142 | {headers, options} = Keyword.pop(options, :headers, [])
143 | {query, options} = Keyword.pop(options, :query, [])
144 | [] = options
145 |
146 | datetime = DateTime.truncate(datetime, :second)
147 | datetime_string = DateTime.to_iso8601(datetime, :basic)
148 | date_string = Date.to_iso8601(datetime, :basic)
149 | url = normalize_url(url)
150 | service = to_string(service)
151 |
152 | canonical_headers =
153 | headers
154 | |> canonical_host_header(url)
155 | |> format_canonical_headers()
156 |
157 | signed_headers = Enum.map_join(canonical_headers, ";", &elem(&1, 0))
158 |
159 | canonical_query_string =
160 | format_canonical_query_params(
161 | [
162 | {"X-Amz-Algorithm", "AWS4-HMAC-SHA256"},
163 | {"X-Amz-Credential",
164 | "#{access_key_id}/#{date_string}/#{region}/#{service}/aws4_request"},
165 | {"X-Amz-Date", datetime_string},
166 | {"X-Amz-Expires", expires},
167 | {"X-Amz-SignedHeaders", signed_headers}
168 | ] ++ query
169 | )
170 |
171 | path = URI.encode(url.path || "/", &(&1 == ?/ or URI.char_unreserved?(&1)))
172 |
173 | true = url.query in [nil, ""]
174 |
175 | method = method |> Atom.to_string() |> String.upcase()
176 |
177 | canonical_headers =
178 | Enum.map_intersperse(canonical_headers, "\n", fn {name, value} -> [name, ":", value] end)
179 |
180 | canonical_request = """
181 | #{method}
182 | #{path}
183 | #{canonical_query_string}
184 | #{canonical_headers}
185 |
186 | #{signed_headers}
187 | UNSIGNED-PAYLOAD\
188 | """
189 |
190 | string_to_sign =
191 | iodata("""
192 | AWS4-HMAC-SHA256
193 | #{datetime_string}
194 | #{date_string}/#{region}/#{service}/aws4_request
195 | #{hex(sha256(canonical_request))}\
196 | """)
197 |
198 | signature =
199 | aws_sigv4(
200 | string_to_sign,
201 | date_string,
202 | region,
203 | service,
204 | secret_access_key
205 | )
206 |
207 | %{url | path: path, query: canonical_query_string <> "&X-Amz-Signature=#{signature}"}
208 | end
209 |
210 | # Try decoding the path in case it was encoded earlier to prevent double encoding,
211 | # as the path is encoded later in the corresponding function.
212 | defp normalize_url(url) do
213 | url = URI.parse(url)
214 |
215 | case url.path do
216 | nil -> url
217 | path -> %{url | path: URI.decode(path)}
218 | end
219 | end
220 |
221 | defp canonical_host_header(headers, %URI{} = url) do
222 | {_host_headers, headers} = Enum.split_with(headers, &match?({"host", _value}, &1))
223 |
224 | host_value =
225 | if is_nil(url.port) or URI.default_port(url.scheme) == url.port do
226 | url.host
227 | else
228 | "#{url.host}:#{url.port}"
229 | end
230 |
231 | [{"host", host_value} | headers]
232 | end
233 |
234 | # Headers must be sorted alphabetically by name
235 | # Header names must be lower case
236 | # Header values must be trimmed
237 | # See https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
238 | defp format_canonical_headers(headers) do
239 | headers
240 | |> Enum.map(&format_canonical_header/1)
241 | |> Enum.sort(fn {name_1, _}, {name_2, _} -> name_1 < name_2 end)
242 | end
243 |
244 | defp format_canonical_header({name, value}) do
245 | name =
246 | name
247 | |> to_string()
248 | |> String.downcase(:ascii)
249 |
250 | value =
251 | value
252 | |> to_string()
253 | |> String.trim()
254 |
255 | {name, value}
256 | end
257 |
258 | # Query params must be sorted alphabetically by name
259 | # Query param name and values must be URI-encoded individually
260 | # Query params must be sorted after encoding
261 | # See https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
262 | defp format_canonical_query_params(query_params) do
263 | query_params
264 | |> Enum.map(&format_canonical_query_param/1)
265 | |> Enum.sort(&canonical_query_param_sorter/2)
266 | |> Enum.map_join("&", fn {name, value} -> "#{name}=#{value}" end)
267 | end
268 |
269 | # Spaces must be encoded as %20, not as "+".
270 | defp format_canonical_query_param({name, value}) do
271 | name =
272 | name
273 | |> to_string()
274 | |> URI.encode(&URI.char_unreserved?/1)
275 |
276 | value =
277 | value
278 | |> to_string()
279 | |> URI.encode(&URI.char_unreserved?/1)
280 |
281 | {name, value}
282 | end
283 |
284 | defp canonical_query_param_sorter({name, value_1}, {name, value_2}), do: value_1 < value_2
285 | defp canonical_query_param_sorter({name_1, _}, {name_2, _}), do: name_1 < name_2
286 |
287 | def aws_sigv4(
288 | string_to_sign,
289 | date_string,
290 | region,
291 | service,
292 | secret_access_key
293 | ) do
294 | signature =
295 | ["AWS4", secret_access_key]
296 | |> hmac(date_string)
297 | |> hmac(region)
298 | |> hmac(service)
299 | |> hmac("aws4_request")
300 | |> hmac(string_to_sign)
301 | |> hex()
302 |
303 | signature
304 | end
305 |
306 | defp hex(data) do
307 | Base.encode16(data, case: :lower)
308 | end
309 |
310 | defp sha256(data) do
311 | :crypto.hash(:sha256, data)
312 | end
313 |
314 | defp hmac(key, data) do
315 | :crypto.mac(:hmac, :sha256, key, data)
316 | end
317 |
318 | @doc """
319 | Formats a datetime as "HTTP Date".
320 |
321 | ## Examples
322 |
323 | iex> Req.Utils.format_http_date(~U[2024-01-01 09:00:00Z])
324 | "Mon, 01 Jan 2024 09:00:00 GMT"
325 | """
326 | def format_http_date(datetime) do
327 | Calendar.strftime(datetime, "%a, %d %b %Y %H:%M:%S GMT")
328 | end
329 |
330 | @doc """
331 | Parses "HTTP Date" as datetime.
332 |
333 | ## Examples
334 |
335 | iex> Req.Utils.parse_http_date("Mon, 01 Jan 2024 09:00:00 GMT")
336 | {:ok, ~U[2024-01-01 09:00:00Z]}
337 | """
338 | def parse_http_date(<<
339 | day_name::binary-size(3),
340 | ", ",
341 | day::binary-size(2),
342 | " ",
343 | month_name::binary-size(3),
344 | " ",
345 | year::binary-size(4),
346 | " ",
347 | time::binary-size(8),
348 | " GMT"
349 | >>) do
350 | with {:ok, day_of_week} <- parse_day_name(day_name),
351 | {day, ""} <- Integer.parse(day),
352 | {:ok, month} <- parse_month_name(month_name),
353 | {year, ""} <- Integer.parse(year),
354 | {:ok, time} <- Time.from_iso8601(time),
355 | {:ok, date} <- Date.new(year, month, day),
356 | true <- day_of_week == Date.day_of_week(date) do
357 | DateTime.new(date, time)
358 | else
359 | {:error, _} = e ->
360 | e
361 |
362 | _ ->
363 | {:error, :invalid_format}
364 | end
365 | end
366 |
367 | def parse_http_date(binary) when is_binary(binary) do
368 | {:error, :invalid_format}
369 | end
370 |
371 | defp parse_month_name("Jan"), do: {:ok, 1}
372 | defp parse_month_name("Feb"), do: {:ok, 2}
373 | defp parse_month_name("Mar"), do: {:ok, 3}
374 | defp parse_month_name("Apr"), do: {:ok, 4}
375 | defp parse_month_name("May"), do: {:ok, 5}
376 | defp parse_month_name("Jun"), do: {:ok, 6}
377 | defp parse_month_name("Jul"), do: {:ok, 7}
378 | defp parse_month_name("Aug"), do: {:ok, 8}
379 | defp parse_month_name("Sep"), do: {:ok, 9}
380 | defp parse_month_name("Oct"), do: {:ok, 10}
381 | defp parse_month_name("Nov"), do: {:ok, 11}
382 | defp parse_month_name("Dec"), do: {:ok, 12}
383 | defp parse_month_name(_), do: :error
384 |
385 | defp parse_day_name("Mon"), do: {:ok, 1}
386 | defp parse_day_name("Tue"), do: {:ok, 2}
387 | defp parse_day_name("Wed"), do: {:ok, 3}
388 | defp parse_day_name("Thu"), do: {:ok, 4}
389 | defp parse_day_name("Fri"), do: {:ok, 5}
390 | defp parse_day_name("Sat"), do: {:ok, 6}
391 | defp parse_day_name("Sun"), do: {:ok, 7}
392 | defp parse_day_name(_), do: :error
393 |
394 | @doc """
395 | Parses "HTTP Date" as datetime or raises an error.
396 |
397 | ## Examples
398 |
399 | iex> Req.Utils.parse_http_date!("Mon, 01 Jan 2024 09:00:00 GMT")
400 | ~U[2024-01-01 09:00:00Z]
401 |
402 | iex> Req.Utils.parse_http_date!("Mon")
403 | ** (ArgumentError) cannot parse "Mon" as HTTP date, reason: :invalid_format
404 | """
405 | def parse_http_date!(binary) do
406 | case parse_http_date(binary) do
407 | {:ok, datetime} ->
408 | datetime
409 |
410 | {:error, reason} ->
411 | raise ArgumentError,
412 | "cannot parse #{inspect(binary)} as HTTP date, reason: #{inspect(reason)}"
413 | end
414 | end
415 |
416 | @doc """
417 | Returns a stream where each element is gzipped.
418 |
419 | ## Examples
420 |
421 | iex> gzipped = Req.Utils.stream_gzip(~w[foo bar baz]) |> Enum.to_list()
422 | iex> :zlib.gunzip(gzipped)
423 | "foobarbaz"
424 | """
425 | def stream_gzip(enumerable) do
426 | Stream.transform(
427 | enumerable,
428 | # start_fun
429 | fn ->
430 | z = :zlib.open()
431 | # copied from :zlib.gzip/1
432 | :ok = :zlib.deflateInit(z, :default, :deflated, 16 + 15, 8, :default)
433 | z
434 | end,
435 | # reducer
436 | fn chunk, z ->
437 | case :zlib.deflate(z, chunk) do
438 | # optimization: avoid emitting empty chunks
439 | [] -> {[], z}
440 | compressed -> {[compressed], z}
441 | end
442 | end,
443 | # last_fun
444 | fn z ->
445 | last = :zlib.deflate(z, [], :finish)
446 | :ok = :zlib.deflateEnd(z)
447 | {[last], z}
448 | end,
449 | # after_fun
450 | fn z -> :ok = :zlib.close(z) end
451 | )
452 | end
453 |
454 | defmodule CollectWithHash do
455 | @moduledoc false
456 |
457 | defstruct [:collectable, :type]
458 |
459 | defimpl Collectable do
460 | def into(%{collectable: collectable, type: type}) do
461 | {acc, collector} = Collectable.into(collectable)
462 |
463 | new_collector = fn
464 | {acc, hash}, {:cont, element} ->
465 | hash = :crypto.hash_update(hash, element)
466 | {collector.(acc, {:cont, element}), hash}
467 |
468 | {acc, hash}, :done ->
469 | hash = :crypto.hash_final(hash)
470 | {collector.(acc, :done), hash}
471 |
472 | {acc, hash}, :halt ->
473 | {collector.(acc, :halt), hash}
474 | end
475 |
476 | hash = hash_init(type)
477 | {{acc, hash}, new_collector}
478 | end
479 |
480 | defp hash_init(:sha1), do: :crypto.hash_init(:sha)
481 | defp hash_init(type), do: :crypto.hash_init(type)
482 | end
483 | end
484 |
485 | @doc """
486 | Returns a collectable with hash.
487 |
488 | ## Examples
489 |
490 | iex> collectable = Req.Utils.collect_with_hash([], :md5)
491 | iex> Enum.into(Stream.duplicate("foo", 2), collectable)
492 | {~w[foo foo], :erlang.md5("foofoo")}
493 | """
494 | def collect_with_hash(collectable, type) do
495 | %CollectWithHash{collectable: collectable, type: type}
496 | end
497 |
498 | @crlf "\r\n"
499 |
500 | @doc """
501 | Encodes fields into "multipart/form-data" format.
502 | """
503 | def encode_form_multipart(fields, options \\ []) do
504 | options = Keyword.validate!(options, [:boundary])
505 |
506 | boundary =
507 | options[:boundary] ||
508 | Base.encode16(:crypto.strong_rand_bytes(16), padding: false, case: :lower)
509 |
510 | footer = [["--", boundary, "--", @crlf]]
511 |
512 | {body, size} =
513 | fields
514 | |> Enum.reduce({[], 0}, &add_form_parts(&2, encode_form_part(&1, boundary)))
515 | |> add_form_parts({footer, IO.iodata_length(footer)})
516 |
517 | %{
518 | size: size,
519 | content_type: "multipart/form-data; boundary=#{boundary}",
520 | body: body
521 | }
522 | end
523 |
524 | defp add_sizes(_, nil), do: nil
525 | defp add_sizes(nil, _), do: nil
526 | defp add_sizes(size1, size2), do: size1 + size2
527 |
528 | defp add_form_parts({parts1, size1}, {parts2, size2})
529 | when is_list(parts1) and is_list(parts2) do
530 | {[parts1, parts2], add_sizes(size1, size2)}
531 | end
532 |
533 | defp add_form_parts({parts1, size1}, {parts2, size2}) do
534 | {Stream.concat(parts1, parts2), add_sizes(size1, size2)}
535 | end
536 |
537 | defp encode_form_part({name, {value, options}}, boundary) do
538 | options = Keyword.validate!(options, [:filename, :content_type, :size])
539 |
540 | {parts, parts_size, options} =
541 | case value do
542 | integer when is_integer(integer) ->
543 | part = Integer.to_string(integer)
544 | {[part], byte_size(part), options}
545 |
546 | value when is_binary(value) or is_list(value) ->
547 | {[value], IO.iodata_length(value), options}
548 |
549 | stream = %File.Stream{} ->
550 | filename = Path.basename(stream.path)
551 |
552 | # TODO: Simplify when we require Elixir v1.15
553 | size =
554 | if not Map.has_key?(stream, :node) or stream.node == node() do
555 | File.stat!(stream.path).size
556 | else
557 | :erpc.call(stream.node, fn -> File.stat!(stream.path).size end)
558 | end
559 |
560 | options =
561 | options
562 | |> Keyword.put_new(:filename, filename)
563 | |> Keyword.put_new_lazy(:content_type, fn ->
564 | MIME.from_path(filename)
565 | end)
566 |
567 | {stream, size, options}
568 |
569 | enum ->
570 | size = Keyword.get(options, :size)
571 |
572 | {enum, size, options}
573 | end
574 |
575 | params =
576 | if filename = options[:filename] do
577 | ["; filename=\"", filename, "\""]
578 | else
579 | []
580 | end
581 |
582 | headers =
583 | if content_type = options[:content_type] do
584 | ["content-type: ", content_type, @crlf]
585 | else
586 | []
587 | end
588 |
589 | headers = ["content-disposition: form-data; name=\"#{name}\"", params, @crlf, headers]
590 | header = [["--", boundary, @crlf, headers, @crlf]]
591 |
592 | {header, IO.iodata_length(header)}
593 | |> add_form_parts({parts, parts_size})
594 | |> add_form_parts({[@crlf], 2})
595 | end
596 |
597 | defp encode_form_part({name, value}, boundary) do
598 | encode_form_part({name, {value, []}}, boundary)
599 | end
600 |
601 | @doc """
602 | Loads .netrc file.
603 |
604 | ## Examples
605 |
606 | iex> {:ok, pid} = StringIO.open(\"""
607 | ...> machine localhost
608 | ...> login foo
609 | ...> password bar
610 | ...> \""")
611 | iex> Req.Utils.load_netrc(pid)
612 | %{"localhost" => {"foo", "bar"}}
613 | """
614 | def load_netrc(path_or_device) do
615 | case read_netrc(path_or_device) do
616 | {:ok, ""} ->
617 | raise ".netrc file is empty"
618 |
619 | {:ok, contents} ->
620 | contents
621 | |> String.trim()
622 | |> String.split()
623 | |> parse_netrc()
624 |
625 | {:error, reason} ->
626 | raise "error reading .netrc file: #{:file.format_error(reason)}"
627 | end
628 | end
629 |
630 | defp read_netrc(path) when is_binary(path) do
631 | File.read(path)
632 | end
633 |
634 | defp read_netrc(pid) when is_pid(pid) do
635 | <> = IO.read(pid, :eof)
636 | {:ok, content}
637 | end
638 |
639 | defp parse_netrc(credentials), do: parse_netrc(credentials, %{})
640 |
641 | defp parse_netrc([], acc), do: acc
642 |
643 | defp parse_netrc([_, machine, _, login, _, password | tail], acc) do
644 | acc = Map.put(acc, String.trim(machine), {String.trim(login), String.trim(password)})
645 | parse_netrc(tail, acc)
646 | end
647 |
648 | defp parse_netrc(_, _), do: raise("error parsing .netrc file")
649 | end
650 |
--------------------------------------------------------------------------------
/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.MixProject do
2 | use Mix.Project
3 |
4 | @version "0.5.10"
5 | @source_url "https://github.com/wojtekmach/req"
6 |
7 | def project do
8 | [
9 | app: :req,
10 | version: @version,
11 | elixir: "~> 1.14",
12 | start_permanent: Mix.env() == :prod,
13 | deps: deps(),
14 | package: package(),
15 | docs: docs(),
16 | aliases: [
17 | "test.all": ["test --include integration"]
18 | ],
19 | xref: [
20 | exclude: [
21 | NimbleCSV.RFC4180,
22 | Plug.Test,
23 | Plug.Conn,
24 | :brotli,
25 | :ezstd
26 | ]
27 | ]
28 | ]
29 | end
30 |
31 | def application do
32 | [
33 | mod: {Req.Application, []},
34 | extra_applications: [:logger]
35 | ]
36 | end
37 |
38 | def cli do
39 | [
40 | preferred_envs: [
41 | "test.all": :test,
42 | docs: :docs,
43 | "hex.publish": :docs
44 | ]
45 | ]
46 | end
47 |
48 | defp package do
49 | [
50 | description: "Req is a batteries-included HTTP client for Elixir.",
51 | licenses: ["Apache-2.0"],
52 | links: %{
53 | "GitHub" => @source_url,
54 | "Changelog" => "https://hexdocs.pm/req/changelog.html"
55 | }
56 | ]
57 | end
58 |
59 | defp deps do
60 | [
61 | {:finch, "~> 0.17", finch_opts()},
62 | {:mime, "~> 2.0.6 or ~> 2.1"},
63 | {:jason, "~> 1.0"},
64 | {:nimble_csv, "~> 1.0", optional: true},
65 | {:plug, "~> 1.0", [optional: true] ++ plug_opts()},
66 | {:brotli, "~> 0.3.1", optional: true},
67 | {:ezstd, "~> 1.0", optional: true},
68 | {:aws_signature, "~> 0.3.2", only: :test},
69 | {:bypass, "~> 2.1", only: :test},
70 | {:ex_doc, ">= 0.0.0", only: :docs, warn_if_outdated: true},
71 | {:bandit, "~> 1.0", only: :test},
72 | {:castore, "~> 1.0", only: :test}
73 | ]
74 | end
75 |
76 | defp finch_opts do
77 | cond do
78 | path = System.get_env("FINCH_PATH") ->
79 | [path: path]
80 |
81 | ref = System.get_env("FINCH_REF") ->
82 | [github: "sneako/finch", ref: ref]
83 |
84 | true ->
85 | []
86 | end
87 | end
88 |
89 | defp plug_opts do
90 | cond do
91 | path = System.get_env("PLUG_PATH") ->
92 | [path: path, override: true]
93 |
94 | ref = System.get_env("PLUG_REF") ->
95 | [github: "elixir-plug/plug", ref: ref, override: true]
96 |
97 | true ->
98 | []
99 | end
100 | end
101 |
102 | defp docs do
103 | [
104 | main: "readme",
105 | source_url: @source_url,
106 | source_ref: "v#{@version}",
107 | groups_for_docs: [
108 | Types: &(&1[:kind] == :type),
109 | Callbacks: &(&1[:kind] == :callback),
110 | "Request Steps": &(&1[:step] == :request),
111 | "Response Steps": &(&1[:step] == :response),
112 | "Error Steps": &(&1[:step] == :error),
113 | Functions: &(&1[:kind] == :function and &1[:type] not in [:request, :mock, :async]),
114 | "Functions (Making Requests)": &(&1[:type] == :request),
115 | "Functions (Async Response)": &(&1[:type] == :async),
116 | "Functions (Mocks & Stubs)": &(&1[:type] == :mock)
117 | ],
118 | extras: [
119 | "README.md",
120 | "CHANGELOG.md"
121 | ],
122 | skip_code_autolink_to: [
123 | "Req.Test.stub/1",
124 | "Req.Utils.aws_sigv4_url/1",
125 | "Req.update/2"
126 | ]
127 | ]
128 | end
129 |
130 | def legacy_headers_as_lists? do
131 | Application.get_env(:req, :legacy_headers_as_lists, false)
132 | end
133 | end
134 |
--------------------------------------------------------------------------------
/mix.lock:
--------------------------------------------------------------------------------
1 | %{
2 | "aws_signature": {:hex, :aws_signature, "0.3.2", "adf33bc4af00b2089b7708bf20e3246f09c639a905a619b3689f0a0a22c3ef8f", [:rebar3], [], "hexpm", "b0daf61feb4250a8ab0adea60db3e336af732ff71dd3fb22e45ae3dcbd071e44"},
3 | "bandit": {:hex, :bandit, "1.6.1", "9e01b93d72ddc21d8c576a704949e86ee6cde7d11270a1d3073787876527a48f", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "5a904bf010ea24b67979835e0507688e31ac873d4ffc8ed0e5413e8d77455031"},
4 | "brotli": {:hex, :brotli, "0.3.2", "59cf45a399098516f1d34f70d8e010e5c9bf326659d3ef34c7cc56793339002b", [:rebar3], [], "hexpm", "9ec3ef9c753f80d0c657b4905193c55e5198f169fa1d1c044d8601d4d931a2ad"},
5 | "bypass": {:hex, :bypass, "2.1.0", "909782781bf8e20ee86a9cabde36b259d44af8b9f38756173e8f5e2e1fabb9b1", [:mix], [{:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.0", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:ranch, "~> 1.3", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "d9b5df8fa5b7a6efa08384e9bbecfe4ce61c77d28a4282f79e02f1ef78d96b80"},
6 | "castore": {:hex, :castore, "1.0.10", "43bbeeac820f16c89f79721af1b3e092399b3a1ecc8df1a472738fd853574911", [:mix], [], "hexpm", "1b0b7ea14d889d9ea21202c43a4fa015eb913021cb535e8ed91946f4b77a8848"},
7 | "cowboy": {:hex, :cowboy, "2.12.0", "f276d521a1ff88b2b9b4c54d0e753da6c66dd7be6c9fca3d9418b561828a3731", [:make, :rebar3], [{:cowlib, "2.13.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "8a7abe6d183372ceb21caa2709bec928ab2b72e18a3911aa1771639bef82651e"},
8 | "cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
9 | "cowlib": {:hex, :cowlib, "2.13.0", "db8f7505d8332d98ef50a3ef34b34c1afddec7506e4ee4dd4a3a266285d282ca", [:make, :rebar3], [], "hexpm", "e1e1284dc3fc030a64b1ad0d8382ae7e99da46c3246b815318a4b848873800a4"},
10 | "earmark_parser": {:hex, :earmark_parser, "1.4.44", "f20830dd6b5c77afe2b063777ddbbff09f9759396500cdbe7523efd58d7a339c", [:mix], [], "hexpm", "4778ac752b4701a5599215f7030989c989ffdc4f6df457c5f36938cc2d2a2750"},
11 | "ex_doc": {:hex, :ex_doc, "0.38.1", "bae0a0bd5b5925b1caef4987e3470902d072d03347114ffe03a55dbe206dd4c2", [:mix], [{:earmark_parser, "~> 1.4.44", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "754636236d191b895e1e4de2ebb504c057fe1995fdfdd92e9d75c4b05633008b"},
12 | "ezstd": {:hex, :ezstd, "1.1.0", "d3b483d6acfadfb65dba4015371e6d54526dbf3d9ef0941b5add8bf5890731f4", [:rebar3], [], "hexpm", "28cfa0ed6cc3922095ad5ba0f23392a1664273358b17184baa909868361184e7"},
13 | "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"},
14 | "hpax": {:hex, :hpax, "1.0.1", "c857057f89e8bd71d97d9042e009df2a42705d6d690d54eca84c8b29af0787b0", [:mix], [], "hexpm", "4e2d5a4f76ae1e3048f35ae7adb1641c36265510a2d4638157fbcb53dda38445"},
15 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
16 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"},
17 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"},
18 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"},
19 | "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"},
20 | "mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"},
21 | "nimble_csv": {:hex, :nimble_csv, "1.2.0", "4e26385d260c61eba9d4412c71cea34421f296d5353f914afe3f2e71cce97722", [:mix], [], "hexpm", "d0628117fcc2148178b034044c55359b26966c6eaa8e2ce15777be3bbc91b12a"},
22 | "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"},
23 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"},
24 | "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"},
25 | "plug": {:hex, :plug, "1.16.1", "40c74619c12f82736d2214557dedec2e9762029b2438d6d175c5074c933edc9d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a13ff6b9006b03d7e33874945b2755253841b238c34071ed85b0e86057f8cddc"},
26 | "plug_cowboy": {:hex, :plug_cowboy, "2.7.2", "fdadb973799ae691bf9ecad99125b16625b1c6039999da5fe544d99218e662e4", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "245d8a11ee2306094840c000e8816f0cbed69a23fc0ac2bcf8d7835ae019bb2f"},
27 | "plug_crypto": {:hex, :plug_crypto, "2.1.0", "f44309c2b06d249c27c8d3f65cfe08158ade08418cf540fd4f72d4d6863abb7b", [:mix], [], "hexpm", "131216a4b030b8f8ce0f26038bc4421ae60e4bb95c5cf5395e1421437824c4fa"},
28 | "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"},
29 | "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"},
30 | "thousand_island": {:hex, :thousand_island, "1.3.7", "1da7598c0f4f5f50562c097a3f8af308ded48cd35139f0e6f17d9443e4d0c9c5", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "0139335079953de41d381a6134d8b618d53d084f558c734f2662d1a72818dd12"},
31 | "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"},
32 | }
33 |
--------------------------------------------------------------------------------
/test/my_netrc:
--------------------------------------------------------------------------------
1 | machine localhost
2 | login foo
3 | password bar
4 |
5 | machine httpbin.org
6 | login foo
7 | password bar
--------------------------------------------------------------------------------
/test/req/default_options_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.DefaultOptionsTest do
2 | use ExUnit.Case
3 |
4 | setup do
5 | bypass = Bypass.open()
6 | [bypass: bypass, url: "http://localhost:#{bypass.port}"]
7 | end
8 |
9 | test "default options", c do
10 | pid = self()
11 |
12 | Bypass.expect(c.bypass, "GET", "/", fn conn ->
13 | send(pid, {:params, conn.params})
14 | Plug.Conn.send_resp(conn, 200, "ok")
15 | end)
16 |
17 | Req.default_options(params: %{"foo" => "bar"})
18 | Req.get!(c.url)
19 | assert_received {:params, %{"foo" => "bar"}}
20 | after
21 | Application.put_env(:req, :default_options, [])
22 | end
23 | end
24 |
--------------------------------------------------------------------------------
/test/req/fields_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.FieldsTest do
2 | use ExUnit.Case, async: true
3 | doctest Req.Fields
4 | end
5 |
--------------------------------------------------------------------------------
/test/req/finch_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.FinchTest do
2 | use ExUnit.Case, async: true
3 | import TestHelper, only: [start_http_server: 1, start_tcp_server: 1]
4 |
5 | describe "run" do
6 | test ":finch_request" do
7 | %{url: url} =
8 | start_http_server(fn conn ->
9 | Plug.Conn.send_resp(conn, 200, "ok")
10 | end)
11 |
12 | pid = self()
13 |
14 | fun = fn req, finch_request, finch_name, finch_opts ->
15 | {:ok, resp} = Finch.request(finch_request, finch_name, finch_opts)
16 | send(pid, resp)
17 | {req, Req.Response.new(status: resp.status, headers: resp.headers, body: "finch_request")}
18 | end
19 |
20 | assert Req.get!(url, finch_request: fun).body == "finch_request"
21 | assert_received %Finch.Response{body: "ok"}
22 | end
23 |
24 | test ":finch_request error" do
25 | fun = fn req, _finch_request, _finch_name, _finch_opts ->
26 | {req, %ArgumentError{message: "exec error"}}
27 | end
28 |
29 | assert_raise ArgumentError, "exec error", fn ->
30 | Req.get!("http://localhost", finch_request: fun, retry: false)
31 | end
32 | end
33 |
34 | test ":finch_request with invalid return" do
35 | fun = fn _, _, _, _ -> :ok end
36 |
37 | assert_raise RuntimeError, ~r"expected adapter to return \{request, response\}", fn ->
38 | Req.get!("http://localhost", finch_request: fun)
39 | end
40 | end
41 |
42 | test "pool timeout" do
43 | %{url: url} =
44 | start_http_server(fn conn ->
45 | Plug.Conn.send_resp(conn, 200, "ok")
46 | end)
47 |
48 | options = [pool_timeout: 0]
49 |
50 | assert_raise RuntimeError, ~r/unable to provide a connection within the timeout/, fn ->
51 | Req.get!(url, options)
52 | end
53 | end
54 |
55 | test ":receive_timeout" do
56 | pid = self()
57 |
58 | %{url: url} =
59 | start_tcp_server(fn socket ->
60 | assert {:ok, "GET / HTTP/1.1\r\n" <> _} = :gen_tcp.recv(socket, 0)
61 | send(pid, :ping)
62 | body = "ok"
63 |
64 | Process.sleep(1000)
65 |
66 | data = """
67 | HTTP/1.1 200 OK
68 | content-length: #{byte_size(body)}
69 |
70 | #{body}
71 | """
72 |
73 | :ok = :gen_tcp.send(socket, data)
74 | end)
75 |
76 | req = Req.new(url: url, receive_timeout: 50, retry: false)
77 | assert {:error, %Req.TransportError{reason: :timeout}} = Req.request(req)
78 | assert_received :ping
79 | end
80 |
81 | test "Req.HTTPError" do
82 | %{url: url} =
83 | start_tcp_server(fn socket ->
84 | assert {:ok, "GET / HTTP/1.1\r\n" <> _} = :gen_tcp.recv(socket, 0)
85 | :ok = :gen_tcp.send(socket, "bad\r\n")
86 | end)
87 |
88 | req = Req.new(url: url, retry: false)
89 | {:error, %Req.HTTPError{protocol: :http1, reason: :invalid_status_line}} = Req.request(req)
90 | end
91 |
92 | test ":connect_options :protocol" do
93 | %{url: url} =
94 | start_http_server(fn conn ->
95 | assert Plug.Conn.get_http_protocol(conn) == :"HTTP/2"
96 | Plug.Conn.send_resp(conn, 200, "ok")
97 | end)
98 |
99 | req = Req.new(url: url, connect_options: [protocols: [:http2]], retry: false)
100 | assert Req.request!(req).body == "ok"
101 | end
102 |
103 | test ":connect_options :proxy" do
104 | %{url: url} =
105 | start_http_server(fn conn ->
106 | Plug.Conn.send_resp(conn, 200, "ok")
107 | end)
108 |
109 | # Bandit will forward request to itself
110 | # Not quite a proper forward proxy server, but good enough
111 | proxy = {:http, "localhost", url.port, []}
112 |
113 | req = Req.new(base_url: url, connect_options: [proxy: proxy])
114 | assert Req.request!(req).body == "ok"
115 | end
116 |
117 | test ":connect_options :hostname" do
118 | %{url: url} =
119 | start_http_server(fn conn ->
120 | assert ["example.com:" <> _] = Plug.Conn.get_req_header(conn, "host")
121 | Plug.Conn.send_resp(conn, 200, "ok")
122 | end)
123 |
124 | req = Req.new(base_url: url, connect_options: [hostname: "example.com"])
125 | assert Req.request!(req).body == "ok"
126 | end
127 |
128 | test ":connect_options :transport_opts" do
129 | %{url: url} =
130 | start_http_server(fn conn ->
131 | Plug.Conn.send_resp(conn, 200, "ok")
132 | end)
133 |
134 | req = Req.new(connect_options: [transport_opts: [cacertfile: "bad.pem"]])
135 |
136 | assert_raise File.Error, ~r/could not read file "bad.pem"/, fn ->
137 | Req.request!(req, url: %{url | scheme: "https"})
138 | end
139 | end
140 |
141 | defmodule ExamplePlug do
142 | def init(options), do: options
143 |
144 | def call(conn, []) do
145 | Plug.Conn.send_resp(conn, 200, "ok")
146 | end
147 | end
148 |
149 | test ":inet6" do
150 | start_supervised!(
151 | {Plug.Cowboy, scheme: :http, plug: ExamplePlug, ref: ExamplePlug.IPv4, port: 0}
152 | )
153 |
154 | start_supervised!(
155 | {Plug.Cowboy,
156 | scheme: :http,
157 | plug: ExamplePlug,
158 | ref: ExamplePlug.IPv6,
159 | port: 0,
160 | net: :inet6,
161 | ipv6_v6only: true}
162 | )
163 |
164 | ipv4_port = :ranch.get_port(ExamplePlug.IPv4)
165 | ipv6_port = :ranch.get_port(ExamplePlug.IPv6)
166 |
167 | req = Req.new(url: "http://localhost:#{ipv4_port}")
168 | assert Req.request!(req).body == "ok"
169 |
170 | req = Req.new(url: "http://localhost:#{ipv4_port}", inet6: true)
171 | assert Req.request!(req).body == "ok"
172 |
173 | req = Req.new(url: "http://localhost:#{ipv6_port}", inet6: true)
174 | assert Req.request!(req).body == "ok"
175 |
176 | req = Req.new(url: "http://[::1]:#{ipv6_port}")
177 | assert Req.request!(req).body == "ok"
178 | end
179 |
180 | test ":connect_options bad option" do
181 | assert_raise ArgumentError, "unknown option :timeou. Did you mean :timeout?", fn ->
182 | Req.get!("http://localhost", connect_options: [timeou: 0])
183 | end
184 | end
185 |
186 | test ":finch option" do
187 | assert_raise ArgumentError, "unknown registry: MyFinch", fn ->
188 | Req.get!("http://localhost", finch: MyFinch)
189 | end
190 | end
191 |
192 | test ":finch and :connect_options" do
193 | assert_raise ArgumentError, "cannot set both :finch and :connect_options", fn ->
194 | Req.request!(finch: MyFinch, connect_options: [timeout: 0])
195 | end
196 | end
197 |
198 | def send_telemetry_metadata_pid(_name, _measurements, metadata, _) do
199 | send(metadata.request.private.pid, :telemetry_private)
200 | :ok
201 | end
202 |
203 | test ":finch_private", %{test: test} do
204 | on_exit(fn -> :telemetry.detach("#{test}") end)
205 |
206 | :ok =
207 | :telemetry.attach(
208 | "#{test}",
209 | [:finch, :request, :stop],
210 | &__MODULE__.send_telemetry_metadata_pid/4,
211 | nil
212 | )
213 |
214 | %{url: url} =
215 | start_http_server(fn conn ->
216 | Plug.Conn.send_resp(conn, 200, "finch_private")
217 | end)
218 |
219 | assert Req.get!(url, finch_private: %{pid: self()}).body == "finch_private"
220 | assert_received :telemetry_private
221 | end
222 |
223 | test "into: fun" do
224 | %{url: url} =
225 | start_tcp_server(fn socket ->
226 | {:ok, "GET / HTTP/1.1\r\n" <> _} = :gen_tcp.recv(socket, 0)
227 |
228 | data = """
229 | HTTP/1.1 200 OK
230 | transfer-encoding: chunked
231 | trailer: x-foo, x-bar
232 |
233 | 6\r
234 | chunk1\r
235 | 6\r
236 | chunk2\r
237 | 0\r
238 | x-foo: foo\r
239 | x-bar: bar\r
240 | \r
241 | """
242 |
243 | :ok = :gen_tcp.send(socket, data)
244 | end)
245 |
246 | pid = self()
247 |
248 | resp =
249 | Req.get!(
250 | url: url,
251 | into: fn {:data, data}, acc ->
252 | send(pid, {:data, data})
253 | {:cont, acc}
254 | end
255 | )
256 |
257 | assert resp.status == 200
258 | assert resp.headers["transfer-encoding"] == ["chunked"]
259 | assert resp.headers["trailer"] == ["x-foo, x-bar"]
260 |
261 | assert resp.trailers["x-foo"] == ["foo"]
262 | assert resp.trailers["x-bar"] == ["bar"]
263 |
264 | assert_receive {:data, "chunk1"}
265 | assert_receive {:data, "chunk2"}
266 | refute_receive _
267 | end
268 |
269 | test "into: fun with halt" do
270 | # try fixing `** (exit) shutdown` on CI by starting custom server
271 | defmodule StreamPlug do
272 | def init(options), do: options
273 |
274 | def call(conn, []) do
275 | conn = Plug.Conn.send_chunked(conn, 200)
276 | {:ok, conn} = Plug.Conn.chunk(conn, "foo")
277 | {:ok, conn} = Plug.Conn.chunk(conn, "bar")
278 | conn
279 | end
280 | end
281 |
282 | start_supervised!({Plug.Cowboy, plug: StreamPlug, scheme: :http, port: 0})
283 | url = "http://localhost:#{:ranch.get_port(StreamPlug.HTTP)}"
284 |
285 | resp =
286 | Req.get!(
287 | url: url,
288 | into: fn {:data, data}, {req, resp} ->
289 | resp = update_in(resp.body, &(&1 <> data))
290 | {:halt, {req, resp}}
291 | end
292 | )
293 |
294 | assert resp.status == 200
295 | assert resp.body == "foo"
296 | end
297 |
298 | test "into: fun handle error" do
299 | assert {:error, %Req.TransportError{reason: :econnrefused}} =
300 | Req.get(
301 | url: "http://localhost:9999",
302 | retry: false,
303 | into: fn {:data, data}, {req, resp} ->
304 | resp = update_in(resp.body, &(&1 <> data))
305 | {:halt, {req, resp}}
306 | end
307 | )
308 | end
309 |
310 | test "into: collectable" do
311 | %{url: url} =
312 | start_tcp_server(fn socket ->
313 | {:ok, "GET / HTTP/1.1\r\n" <> _} = :gen_tcp.recv(socket, 0)
314 |
315 | data = """
316 | HTTP/1.1 200 OK
317 | transfer-encoding: chunked
318 | trailer: x-foo, x-bar
319 |
320 | 6\r
321 | chunk1\r
322 | 6\r
323 | chunk2\r
324 | 0\r
325 | x-foo: foo\r
326 | x-bar: bar\r
327 | \r
328 | """
329 |
330 | :ok = :gen_tcp.send(socket, data)
331 | end)
332 |
333 | resp =
334 | Req.get!(
335 | url: url,
336 | into: []
337 | )
338 |
339 | assert resp.status == 200
340 | assert resp.headers["transfer-encoding"] == ["chunked"]
341 | assert resp.headers["trailer"] == ["x-foo, x-bar"]
342 |
343 | assert resp.trailers["x-foo"] == ["foo"]
344 | assert resp.trailers["x-bar"] == ["bar"]
345 |
346 | assert resp.body == ["chunk1", "chunk2"]
347 | end
348 |
349 | test "into: collectable non-200" do
350 | # Ignores the collectable and returns body as usual
351 |
352 | %{url: url} =
353 | start_http_server(fn conn ->
354 | Req.Test.json(%{conn | status: 404}, %{error: "not found"})
355 | end)
356 |
357 | resp =
358 | Req.get!(
359 | url: url,
360 | into: :not_a_collectable
361 | )
362 |
363 | assert resp.status == 404
364 | assert resp.body == %{"error" => "not found"}
365 | end
366 |
367 | test "into: collectable handle error" do
368 | assert {:error, %Req.TransportError{reason: :econnrefused}} =
369 | Req.get(
370 | url: "http://localhost:9999",
371 | retry: false,
372 | into: IO.stream()
373 | )
374 | end
375 |
376 | # TODO
377 | @tag :skip
378 | test "into: fun with content-encoding" do
379 | %{url: url} =
380 | start_http_server(fn conn ->
381 | conn
382 | |> Plug.Conn.put_resp_header("content-encoding", "gzip")
383 | |> Plug.Conn.send_resp(200, :zlib.gzip("foo"))
384 | end)
385 |
386 | pid = self()
387 |
388 | fun = fn {:data, data}, acc ->
389 | send(pid, {:data, data})
390 | {:cont, acc}
391 | end
392 |
393 | assert Req.get!(url: url, into: fun).body == ""
394 | assert_received {:data, "foo"}
395 | refute_receive _
396 | end
397 |
398 | test "into: :self" do
399 | %{url: url} =
400 | start_http_server(fn conn ->
401 | conn = Plug.Conn.send_chunked(conn, 200)
402 | {:ok, conn} = Plug.Conn.chunk(conn, "foo")
403 | {:ok, conn} = Plug.Conn.chunk(conn, "bar")
404 | conn
405 | end)
406 |
407 | resp = Req.get!(url: url, into: :self)
408 | assert resp.status == 200
409 | assert {:ok, [data: "foo"]} = Req.parse_message(resp, assert_receive(_))
410 | assert {:ok, [data: "bar"]} = Req.parse_message(resp, assert_receive(_))
411 | assert {:ok, [:done]} = Req.parse_message(resp, assert_receive(_))
412 | assert :unknown = Req.parse_message(resp, :other)
413 | refute_receive _
414 | end
415 |
416 | test "into: :self cancel" do
417 | %{url: url} =
418 | start_http_server(fn conn ->
419 | conn = Plug.Conn.send_chunked(conn, 200)
420 | {:ok, conn} = Plug.Conn.chunk(conn, "foo")
421 | {:ok, conn} = Plug.Conn.chunk(conn, "bar")
422 | conn
423 | end)
424 |
425 | resp = Req.get!(url: url, into: :self)
426 | assert resp.status == 200
427 | assert :ok = Req.cancel_async_response(resp)
428 | end
429 |
430 | @tag :capture_log
431 | test "into: :self with redirect" do
432 | %{url: url} =
433 | TestHelper.start_http_server(fn conn ->
434 | Plug.Conn.send_resp(conn, 200, "ok")
435 | end)
436 |
437 | %{url: url} =
438 | TestHelper.start_http_server(fn conn ->
439 | conn
440 | |> Plug.Conn.put_resp_header("location", to_string(url))
441 | |> Plug.Conn.send_resp(307, "redirecting to #{url}")
442 | end)
443 |
444 | req =
445 | Req.new(
446 | url: url,
447 | into: :self
448 | )
449 |
450 | assert Req.get!(req).body |> Enum.to_list() == ["ok"]
451 | end
452 |
453 | test "into: :self enumerable with unrelated message" do
454 | %{url: url} =
455 | start_http_server(fn conn ->
456 | Plug.Conn.send_resp(conn, 200, "ok")
457 | end)
458 |
459 | send(self(), :other)
460 | resp = Req.get!(url: url, into: :self)
461 | assert Enum.to_list(resp.body) == ["ok"]
462 | assert_received :other
463 | end
464 |
465 | test "into: :self with :receive_timeout" do
466 | %{url: url} =
467 | start_http_server(fn conn ->
468 | Process.sleep(100)
469 | Plug.Conn.send_resp(conn, 200, "ok")
470 | end)
471 |
472 | assert Req.get(url: url, into: :self, receive_timeout: 0, retry: false) ==
473 | {:error, %Req.TransportError{reason: :timeout}}
474 | end
475 | end
476 |
477 | describe "pool_options" do
478 | test "defaults" do
479 | assert Req.Finch.pool_options([]) ==
480 | [
481 | protocols: [:http1]
482 | ]
483 | end
484 |
485 | test "ipv6" do
486 | assert Req.Finch.pool_options(inet6: true) ==
487 | [
488 | protocols: [:http1],
489 | conn_opts: [transport_opts: [inet6: true]]
490 | ]
491 | end
492 |
493 | test "connect_options protocols" do
494 | assert Req.Finch.pool_options(connect_options: [protocols: [:http2]]) ==
495 | [
496 | protocols: [:http2]
497 | ]
498 | end
499 |
500 | test "connect_options timeout" do
501 | assert Req.Finch.pool_options(connect_options: [timeout: 0]) ==
502 | [
503 | protocols: [:http1],
504 | conn_opts: [transport_opts: [timeout: 0]]
505 | ]
506 | end
507 |
508 | test "connect_options transport_opts" do
509 | assert Req.Finch.pool_options(connect_options: [transport_opts: [cacerts: []]]) ==
510 | [
511 | protocols: [:http1],
512 | conn_opts: [transport_opts: [cacerts: []]]
513 | ]
514 | end
515 |
516 | test "connect_options transport_opts + timeout + ipv6" do
517 | assert Req.Finch.pool_options(
518 | connect_options: [timeout: 0, transport_opts: [cacerts: []]],
519 | inet6: true
520 | ) ==
521 | [
522 | protocols: [:http1],
523 | conn_opts: [transport_opts: [timeout: 0, inet6: true, cacerts: []]]
524 | ]
525 | end
526 | end
527 | end
528 |
--------------------------------------------------------------------------------
/test/req/httpc_test.exs:
--------------------------------------------------------------------------------
1 | # Experimental httpc adapter to test the adapter contract.
2 |
3 | defmodule Req.Httpc do
4 | def run(request) do
5 | httpc_url = request.url |> URI.to_string() |> String.to_charlist()
6 |
7 | httpc_headers =
8 | for {name, values} <- request.headers,
9 | # TODO: remove List.wrap on Req 1.0
10 | value <- List.wrap(values) do
11 | {String.to_charlist(name), String.to_charlist(value)}
12 | end
13 |
14 | httpc_req =
15 | if request.method in [:post, :put] do
16 | content_type =
17 | case Req.Request.get_header(request, "content-type") do
18 | [value] ->
19 | String.to_charlist(value)
20 |
21 | [] ->
22 | ~c"application/octet-stream"
23 | end
24 |
25 | body =
26 | case request.body do
27 | {:stream, enumerable} ->
28 | httpc_enumerable_to_fun(enumerable)
29 |
30 | iodata ->
31 | iodata
32 | end
33 |
34 | {httpc_url, httpc_headers, content_type, body}
35 | else
36 | {httpc_url, httpc_headers}
37 | end
38 |
39 | httpc_http_options = [
40 | ssl: [
41 | verify: :verify_peer,
42 | cacertfile: CAStore.file_path(),
43 | depth: 2,
44 | customize_hostname_check: [
45 | match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
46 | ]
47 | ]
48 | ]
49 |
50 | httpc_options = [
51 | body_format: :binary
52 | ]
53 |
54 | case request.into do
55 | nil ->
56 | httpc_request(request, httpc_req, httpc_http_options, httpc_options)
57 |
58 | :self ->
59 | httpc_async(request, httpc_req, httpc_http_options, httpc_options, :self)
60 |
61 | fun ->
62 | httpc_async(request, httpc_req, httpc_http_options, httpc_options, fun)
63 | end
64 | end
65 |
66 | defp httpc_request(request, httpc_req, httpc_http_options, httpc_options) do
67 | case :httpc.request(request.method, httpc_req, httpc_http_options, httpc_options) do
68 | {:ok, {{_, status, _}, headers, body}} ->
69 | headers =
70 | for {name, value} <- headers do
71 | {List.to_string(name), List.to_string(value)}
72 | end
73 |
74 | {request, Req.Response.new(status: status, headers: headers, body: body)}
75 | end
76 | end
77 |
78 | defp httpc_enumerable_to_fun(enumerable) do
79 | reducer = fn item, _acc ->
80 | {:suspend, item}
81 | end
82 |
83 | {_, _, fun} = Enumerable.reduce(enumerable, {:suspend, nil}, reducer)
84 |
85 | {:chunkify, &httpc_next/1, fun}
86 | end
87 |
88 | defp httpc_next(fun) do
89 | case fun.({:cont, nil}) do
90 | {:suspended, element, fun} ->
91 | {:ok, element, fun}
92 |
93 | {:done, nil} ->
94 | :eof
95 |
96 | {:halted, element} ->
97 | {:ok, element, fn _ -> {:done, nil} end}
98 | end
99 | end
100 |
101 | defp httpc_async(request, httpc_req, httpc_http_options, httpc_options, self_or_fun) do
102 | stream =
103 | case self_or_fun do
104 | :self ->
105 | :self
106 |
107 | fun when is_function(fun) ->
108 | {:self, :once}
109 | end
110 |
111 | httpc_options = [sync: false, stream: stream] ++ httpc_options
112 | {:ok, ref} = :httpc.request(request.method, httpc_req, httpc_http_options, httpc_options)
113 |
114 | receive do
115 | {:http, {^ref, :stream_start, headers}} ->
116 | headers =
117 | for {name, value} <- headers do
118 | {List.to_string(name), List.to_string(value)}
119 | end
120 |
121 | status =
122 | case List.keyfind(headers, "content-range", 0) do
123 | {_, _} -> 206
124 | _ -> 200
125 | end
126 |
127 | async = %Req.Response.Async{
128 | pid: self(),
129 | ref: ref,
130 | stream_fun: &httpc_stream/2,
131 | cancel_fun: &httpc_cancel/1
132 | }
133 |
134 | response = Req.Response.new(status: status, headers: headers)
135 | response = put_in(response.async, async)
136 | {request, response}
137 |
138 | {:http, {ref, :stream_start, headers, pid}} ->
139 | headers =
140 | for {name, value} <- headers do
141 | {List.to_string(name), List.to_string(value)}
142 | end
143 |
144 | status =
145 | case List.keyfind(headers, "content-range", 0) do
146 | {_, _} -> 206
147 | _ -> 200
148 | end
149 |
150 | response = Req.Response.new(status: status, headers: headers)
151 |
152 | case self_or_fun do
153 | :self ->
154 | {request, response}
155 |
156 | fun when is_function(fun) ->
157 | httpc_loop(request, response, ref, pid, fun)
158 | end
159 |
160 | {:http, {^ref, {{_, status, _}, headers, body}}} ->
161 | headers =
162 | for {name, value} <- headers do
163 | {List.to_string(name), List.to_string(value)}
164 | end
165 |
166 | response = Req.Response.new(status: status, headers: headers, body: body)
167 | {request, response}
168 | end
169 | end
170 |
171 | @doc false
172 | def httpc_stream(ref, {:http, {ref, :stream, data}}) do
173 | {:ok, [{:data, data}]}
174 | end
175 |
176 | # TODO: handle trailers
177 | def httpc_stream(ref, {:http, {ref, :stream_end, _headers}}) do
178 | {:ok, [:done]}
179 | end
180 |
181 | @doc false
182 | def httpc_cancel(ref) do
183 | :httpc.cancel_request(ref)
184 | end
185 |
186 | defp httpc_loop(request, response, ref, pid, fun) do
187 | :ok = :httpc.stream_next(pid)
188 |
189 | receive do
190 | {:http, {^ref, :stream, data}} ->
191 | case fun.({:data, data}, {request, response}) do
192 | {:cont, {request, response}} ->
193 | httpc_loop(request, response, ref, pid, fun)
194 |
195 | {:halt, {request, response}} ->
196 | :ok = :httpc.cancel_request(ref)
197 | {request, response}
198 | end
199 |
200 | # TODO: handle trailers
201 | {:http, {^ref, :stream_end, _headers}} ->
202 | {request, response}
203 | end
204 | end
205 | end
206 |
207 | defmodule Req.HttpcTest do
208 | use ExUnit.Case, async: true
209 |
210 | require Logger
211 |
212 | # TODO
213 | @moduletag :skip
214 |
215 | setup do
216 | bypass = Bypass.open()
217 |
218 | req =
219 | Req.new(
220 | adapter: &Req.Httpc.run/1,
221 | url: "http://localhost:#{bypass.port}"
222 | )
223 |
224 | [bypass: bypass, req: req]
225 | end
226 |
227 | if function_exported?(Mix, :ensure_application!, 1) do
228 | Mix.ensure_application!(:inets)
229 | end
230 |
231 | describe "httpc" do
232 | test "request", %{bypass: bypass, req: req} do
233 | Bypass.expect(bypass, "GET", "/", fn conn ->
234 | Plug.Conn.send_resp(conn, 200, "ok")
235 | end)
236 |
237 | resp = Req.get!(req)
238 | assert resp.status == 200
239 | assert Req.Response.get_header(resp, "server") == ["Cowboy"]
240 | assert resp.body == "ok"
241 | end
242 |
243 | test "post request body", %{bypass: bypass, req: req} do
244 | Bypass.expect(bypass, "POST", "/", fn conn ->
245 | assert {:ok, body, conn} = Plug.Conn.read_body(conn)
246 | Plug.Conn.send_resp(conn, 200, body)
247 | end)
248 |
249 | resp = Req.post!(req, body: "foofoofoo")
250 | assert resp.status == 200
251 | assert resp.body == "foofoofoo"
252 | end
253 |
254 | test "stream request body", %{bypass: bypass, req: req} do
255 | Bypass.expect(bypass, "POST", "/", fn conn ->
256 | assert {:ok, body, conn} = Plug.Conn.read_body(conn)
257 | Plug.Conn.send_resp(conn, 200, body)
258 | end)
259 |
260 | resp = Req.post!(req, body: {:stream, Stream.take(["foo", "foo", "foo"], 2)})
261 | assert resp.status == 200
262 | assert resp.body == "foofoo"
263 | end
264 |
265 | test "into: fun", %{req: req, bypass: bypass} do
266 | Bypass.expect(bypass, "GET", "/", fn conn ->
267 | conn = Plug.Conn.send_chunked(conn, 200)
268 | {:ok, conn} = Plug.Conn.chunk(conn, "foo")
269 | {:ok, conn} = Plug.Conn.chunk(conn, "bar")
270 | conn
271 | end)
272 |
273 | pid = self()
274 |
275 | resp =
276 | Req.get!(
277 | req,
278 | into: fn {:data, data}, acc ->
279 | send(pid, {:data, data})
280 | {:cont, acc}
281 | end
282 | )
283 |
284 | assert resp.status == 200
285 | assert resp.headers["transfer-encoding"] == ["chunked"]
286 | assert_receive {:data, "foobar"}
287 |
288 | # httpc seems to randomly chunk things
289 | receive do
290 | {:data, ""} -> :ok
291 | after
292 | 0 -> :ok
293 | end
294 |
295 | refute_receive _
296 | end
297 |
298 | test "into: :self", %{req: req, bypass: bypass} do
299 | Bypass.expect(bypass, "GET", "/", fn conn ->
300 | conn = Plug.Conn.send_chunked(conn, 200)
301 | {:ok, conn} = Plug.Conn.chunk(conn, "foo")
302 | {:ok, conn} = Plug.Conn.chunk(conn, "bar")
303 | conn
304 | end)
305 |
306 | resp = Req.get!(req, into: :self)
307 | assert resp.status == 200
308 |
309 | # httpc seems to randomly chunk things
310 | assert Req.parse_message(resp, assert_receive(_)) in [
311 | {:ok, [data: "foo"]},
312 | {:ok, [data: "foobar"]}
313 | ]
314 |
315 | assert Req.parse_message(resp, assert_receive(_)) in [
316 | {:ok, [data: "bar"]},
317 | {:ok, [data: ""]},
318 | {:ok, [:done]}
319 | ]
320 | end
321 |
322 | test "into: pid cancel", %{req: req, bypass: bypass} do
323 | Bypass.expect(bypass, "GET", "/", fn conn ->
324 | conn = Plug.Conn.send_chunked(conn, 200)
325 | {:ok, conn} = Plug.Conn.chunk(conn, "foo")
326 | {:ok, conn} = Plug.Conn.chunk(conn, "bar")
327 | conn
328 | end)
329 |
330 | resp = Req.get!(req, into: :self)
331 | assert resp.status == 200
332 | assert :ok = Req.cancel_async_response(resp)
333 | end
334 | end
335 | end
336 |
--------------------------------------------------------------------------------
/test/req/integration_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.IntegrationTest do
2 | use ExUnit.Case, async: true
3 |
4 | @moduletag :integration
5 |
6 | setup context do
7 | if context[:doctest] do
8 | original_gl = Process.group_leader()
9 | {:ok, capture_gl} = StringIO.open("")
10 | Process.group_leader(self(), capture_gl)
11 |
12 | on_exit(fn ->
13 | Process.group_leader(self(), original_gl)
14 | end)
15 | else
16 | :ok
17 | end
18 | end
19 |
20 | doctest Req,
21 | only: [
22 | get!: 2,
23 | head!: 2,
24 | post!: 2,
25 | put!: 2,
26 | patch!: 2,
27 | delete!: 2,
28 | run: 2,
29 | run!: 2
30 | ]
31 |
32 | doctest Req.Steps,
33 | only: [
34 | auth: 1,
35 | checksum: 1,
36 | put_user_agent: 1,
37 | compressed: 1,
38 | put_base_url: 1,
39 | encode_body: 1,
40 | put_params: 1,
41 | put_path_params: 1,
42 | put_range: 1,
43 | cache: 1,
44 | decompress_body: 1,
45 | handle_http_errors: 1
46 | ]
47 |
48 | @tag :s3
49 | test "s3" do
50 | aws_access_key_id = System.fetch_env!("REQ_AWS_ACCESS_KEY_ID")
51 | aws_secret_access_key = System.fetch_env!("REQ_AWS_SECRET_ACCESS_KEY")
52 | aws_bucket = System.fetch_env!("REQ_AWS_BUCKET")
53 |
54 | req =
55 | Req.new(
56 | base_url: "https://#{aws_bucket}.s3.amazonaws.com",
57 | aws_sigv4: [
58 | access_key_id: aws_access_key_id,
59 | secret_access_key: aws_secret_access_key
60 | ]
61 | )
62 |
63 | now = to_string(DateTime.utc_now())
64 |
65 | %{status: 200} =
66 | Req.put!(req,
67 | url: "/key1",
68 | body: now
69 | )
70 |
71 | assert Req.get!(req, url: "/key1").body == now
72 |
73 | now = to_string(DateTime.utc_now())
74 |
75 | %{status: 200} =
76 | Req.put!(req,
77 | url: "/key1",
78 | headers: [content_length: byte_size(now) * 2],
79 | body: Stream.take(Stream.cycle([now]), 2)
80 | )
81 |
82 | assert Req.get!(req, url: "/key1").body == String.duplicate(now, 2)
83 | end
84 | end
85 |
--------------------------------------------------------------------------------
/test/req/request_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.RequestTest do
2 | use ExUnit.Case, async: true
3 | doctest Req.Request, except: [delete_header: 2]
4 |
5 | setup do
6 | bypass = Bypass.open()
7 | [bypass: bypass, url: "http://localhost:#{bypass.port}"]
8 | end
9 |
10 | test "low-level API", c do
11 | Bypass.expect(c.bypass, "GET", "/ok", fn conn ->
12 | Plug.Conn.send_resp(conn, 200, "ok")
13 | end)
14 |
15 | request = new(url: c.url <> "/ok")
16 | assert {:ok, %{status: 200, body: "ok"}} = Req.Request.run(request)
17 | end
18 |
19 | test "merge_options/2: deprecated options" do
20 | output =
21 | ExUnit.CaptureIO.capture_io(:stderr, fn ->
22 | Req.Request.merge_options(Req.new(), url: "foo", headers: "bar")
23 | end)
24 |
25 | assert output =~ "Passing :url/:headers is deprecated"
26 | end
27 |
28 | test "simple request step", c do
29 | Bypass.expect(c.bypass, "GET", "/ok", fn conn ->
30 | Plug.Conn.send_resp(conn, 200, "ok")
31 | end)
32 |
33 | request =
34 | new(url: c.url <> "/not-found")
35 | |> Req.Request.prepend_request_steps(
36 | foo: fn request ->
37 | put_in(request.url.path, "/ok")
38 | end
39 | )
40 |
41 | assert {:ok, %{status: 200, body: "ok"}} = Req.Request.run(request)
42 | end
43 |
44 | test "step as MFArgs", c do
45 | Bypass.expect(c.bypass, "GET", "/", fn conn ->
46 | Plug.Conn.send_resp(conn, 200, "ok")
47 | end)
48 |
49 | request =
50 | new(url: c.url)
51 | |> Req.Request.prepend_request_steps(foo: {__MODULE__, :simple_step, [:hi]})
52 |
53 | assert {:ok, %{status: 200, body: "ok"}} = Req.Request.run(request)
54 | assert_received :hi
55 | end
56 |
57 | def simple_step(request, what) do
58 | send(self(), what)
59 | request
60 | end
61 |
62 | test "request step returns response", c do
63 | request =
64 | new(url: c.url <> "/ok")
65 | |> Req.Request.prepend_request_steps(
66 | foo: fn request ->
67 | {request, %Req.Response{status: 200, body: "from cache"}}
68 | end
69 | )
70 | |> Req.Request.prepend_response_steps(
71 | foo: fn {request, response} ->
72 | {request, update_in(response.body, &(&1 <> " - updated"))}
73 | end
74 | )
75 |
76 | assert {:ok, %{status: 200, body: "from cache - updated"}} = Req.Request.run(request)
77 | end
78 |
79 | test "request step returns exception", c do
80 | request =
81 | new(url: c.url <> "/ok")
82 | |> Req.Request.prepend_request_steps(
83 | foo: fn request ->
84 | {request, RuntimeError.exception("oops")}
85 | end
86 | )
87 | |> Req.Request.prepend_error_steps(
88 | foo: fn {request, exception} ->
89 | {request, update_in(exception.message, &(&1 <> " - updated"))}
90 | end
91 | )
92 |
93 | assert {:error, %RuntimeError{message: "oops - updated"}} = Req.Request.run(request)
94 | end
95 |
96 | test "request step halts with response", c do
97 | request =
98 | new(url: c.url <> "/ok")
99 | |> Req.Request.prepend_request_steps(
100 | foo: fn request ->
101 | Req.Request.halt(request, %Req.Response{status: 200, body: "from cache"})
102 | end,
103 | bar: &unreachable/1
104 | )
105 | |> Req.Request.prepend_response_steps(foo: &unreachable/1)
106 | |> Req.Request.prepend_error_steps(foo: &unreachable/1)
107 |
108 | assert {:ok, %{status: 200, body: "from cache"}} = Req.Request.run(request)
109 | end
110 |
111 | test "request step halts with exception", c do
112 | request =
113 | new(url: c.url <> "/ok")
114 | |> Req.Request.prepend_request_steps(
115 | foo: fn request ->
116 | Req.Request.halt(request, RuntimeError.exception("oops"))
117 | end,
118 | bar: &unreachable/1
119 | )
120 | |> Req.Request.prepend_response_steps(foo: &unreachable/1)
121 | |> Req.Request.prepend_error_steps(foo: &unreachable/1)
122 |
123 | assert {:error, %RuntimeError{message: "oops"}} = Req.Request.run(request)
124 | end
125 |
126 | test "simple response step", c do
127 | Bypass.expect(c.bypass, "GET", "/ok", fn conn ->
128 | Plug.Conn.send_resp(conn, 200, "ok")
129 | end)
130 |
131 | request =
132 | new(url: c.url <> "/ok")
133 | |> Req.Request.prepend_response_steps(
134 | foo: fn {request, response} ->
135 | {request, update_in(response.body, &(&1 <> " - updated"))}
136 | end
137 | )
138 |
139 | assert {:ok, %{status: 200, body: "ok - updated"}} = Req.Request.run(request)
140 | end
141 |
142 | test "response step returns exception", c do
143 | Bypass.expect(c.bypass, "GET", "/ok", fn conn ->
144 | Plug.Conn.send_resp(conn, 200, "ok")
145 | end)
146 |
147 | request =
148 | new(url: c.url <> "/ok")
149 | |> Req.Request.prepend_response_steps(
150 | foo: fn {request, response} ->
151 | assert response.body == "ok"
152 | {request, RuntimeError.exception("oops")}
153 | end
154 | )
155 | |> Req.Request.prepend_error_steps(
156 | foo: fn {request, exception} ->
157 | {request, update_in(exception.message, &(&1 <> " - updated"))}
158 | end
159 | )
160 |
161 | assert {:error, %RuntimeError{message: "oops - updated"}} = Req.Request.run(request)
162 | end
163 |
164 | test "response step halts with response", c do
165 | Bypass.expect(c.bypass, "GET", "/ok", fn conn ->
166 | Plug.Conn.send_resp(conn, 200, "ok")
167 | end)
168 |
169 | request =
170 | new(url: c.url <> "/ok")
171 | |> Req.Request.prepend_response_steps(
172 | foo: fn {request, response} ->
173 | Req.Request.halt(request, update_in(response.body, &(&1 <> " - updated")))
174 | end,
175 | bar: &unreachable/1
176 | )
177 | |> Req.Request.prepend_error_steps(foo: &unreachable/1)
178 |
179 | assert {:ok, %{status: 200, body: "ok - updated"}} = Req.Request.run(request)
180 | end
181 |
182 | test "response step halts with exception", c do
183 | Bypass.expect(c.bypass, "GET", "/ok", fn conn ->
184 | Plug.Conn.send_resp(conn, 200, "ok")
185 | end)
186 |
187 | request =
188 | new(url: c.url <> "/ok")
189 | |> Req.Request.prepend_response_steps(
190 | foo: fn {request, response} ->
191 | assert response.body == "ok"
192 | Req.Request.halt(request, RuntimeError.exception("oops"))
193 | end,
194 | bar: &unreachable/1
195 | )
196 | |> Req.Request.prepend_error_steps(foo: &unreachable/1)
197 |
198 | assert {:error, %RuntimeError{message: "oops"}} = Req.Request.run(request)
199 | end
200 |
201 | test "simple error step", c do
202 | Bypass.down(c.bypass)
203 |
204 | request =
205 | new(url: c.url <> "/ok")
206 | |> Req.Request.prepend_error_steps(
207 | foo: fn {request, exception} ->
208 | assert exception.reason == :econnrefused
209 | {request, RuntimeError.exception("oops")}
210 | end
211 | )
212 |
213 | assert {:error, %RuntimeError{message: "oops"}} = Req.Request.run(request)
214 | end
215 |
216 | test "error step returns response", c do
217 | Bypass.down(c.bypass)
218 |
219 | request =
220 | new(url: c.url <> "/ok")
221 | |> Req.Request.prepend_response_steps(
222 | foo: fn {request, response} ->
223 | {request, update_in(response.body, &(&1 <> " - updated"))}
224 | end
225 | )
226 | |> Req.Request.prepend_error_steps(
227 | foo: fn {request, exception} ->
228 | assert exception.reason == :econnrefused
229 | {request, %Req.Response{status: 200, body: "ok"}}
230 | end,
231 | bar: &unreachable/1
232 | )
233 |
234 | assert {:ok, %{status: 200, body: "ok - updated"}} = Req.Request.run(request)
235 | end
236 |
237 | test "error step halts with response", c do
238 | Bypass.down(c.bypass)
239 |
240 | request =
241 | new(url: c.url <> "/ok")
242 | |> Req.Request.prepend_response_steps(foo: &unreachable/1)
243 | |> Req.Request.prepend_error_steps(
244 | foo: fn {request, exception} ->
245 | assert exception.reason == :econnrefused
246 | Req.Request.halt(request, %Req.Response{status: 200, body: "ok"})
247 | end,
248 | bar: &unreachable/1
249 | )
250 |
251 | assert {:ok, %{status: 200, body: "ok"}} = Req.Request.run(request)
252 | end
253 |
254 | test "prepare/1" do
255 | request =
256 | Req.new(method: :get, base_url: "http://foo", url: "/bar", auth: {:basic, "foo:bar"})
257 | |> Req.Request.prepare()
258 |
259 | assert request.url == URI.parse("http://foo/bar")
260 |
261 | authorization = "Basic " <> Base.encode64("foo:bar")
262 |
263 | if Req.MixProject.legacy_headers_as_lists?() do
264 | assert [
265 | {"user-agent", "req/0.3.11"},
266 | {"accept-encoding", "zstd, br, gzip"},
267 | {"authorization", ^authorization}
268 | ] = request.headers
269 | else
270 | assert %{
271 | "user-agent" => ["req/" <> _],
272 | "accept-encoding" => ["zstd, br, gzip"],
273 | "authorization" => [^authorization]
274 | } = request.headers
275 | end
276 | end
277 |
278 | ## Helpers
279 |
280 | defp new(options) do
281 | options = Keyword.update(options, :url, nil, &URI.parse/1)
282 | struct!(Req.Request, options)
283 | end
284 |
285 | defp unreachable(_) do
286 | raise "unreachable"
287 | end
288 | end
289 |
--------------------------------------------------------------------------------
/test/req/response_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.ResponseTest do
2 | use ExUnit.Case, async: true
3 | doctest Req.Response, except: [get_header: 2, delete_header: 2]
4 | end
5 |
--------------------------------------------------------------------------------
/test/req/test_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.TestTest do
2 | use ExUnit.Case, async: true
3 | doctest Req.Test, except: [expect: 3]
4 |
5 | test "__fetch_plug__" do
6 | assert_raise RuntimeError, ~r/cannot find mock/, fn ->
7 | Req.Test.__fetch_plug__(:foo)
8 | end
9 |
10 | Req.Test.stub(:foo, {MyPlug, [1]})
11 | assert Req.Test.__fetch_plug__(:foo) == {MyPlug, [1]}
12 |
13 | Req.Test.stub(:foo, {MyPlug, [2]})
14 | assert Req.Test.__fetch_plug__(:foo) == {MyPlug, [2]}
15 |
16 | Task.async(fn ->
17 | assert Req.Test.__fetch_plug__(:foo) == {MyPlug, [2]}
18 | Req.Test.stub(:foo, {MyPlug, [3]})
19 | end)
20 | |> Task.await()
21 |
22 | assert Req.Test.__fetch_plug__(:foo) == {MyPlug, [2]}
23 |
24 | Req.Test.set_req_test_to_shared()
25 | Req.Test.stub(:bar, {SharedPlug, [1]})
26 |
27 | Task.async(fn ->
28 | assert Req.Test.__fetch_plug__(:bar) == {SharedPlug, [1]}
29 | end)
30 | |> Task.await()
31 |
32 | Req.Test.expect(:baz, {SharedPlug, [1]})
33 |
34 | Task.async(fn ->
35 | assert Req.Test.__fetch_plug__(:baz) == {SharedPlug, [1]}
36 | end)
37 | |> Task.await()
38 | after
39 | Req.Test.set_req_test_to_private()
40 | end
41 |
42 | describe "expect/3" do
43 | test "works in the normal expectation-based way" do
44 | Req.Test.expect(:foo, 2, 1)
45 | assert Req.Test.__fetch_plug__(:foo) == 1
46 | assert Req.Test.__fetch_plug__(:foo) == 1
47 |
48 | assert_raise RuntimeError, "no mock or stub for :foo", fn ->
49 | Req.Test.__fetch_plug__(:foo)
50 | end
51 | end
52 |
53 | test "works with the default expected count of 1" do
54 | Req.Test.expect(:foo_default, 1)
55 | assert Req.Test.__fetch_plug__(:foo_default) == 1
56 |
57 | assert_raise RuntimeError, "no mock or stub for :foo_default", fn ->
58 | assert Req.Test.__fetch_plug__(:foo_default)
59 | end
60 | end
61 |
62 | test "works in order" do
63 | Req.Test.expect(:foo, :a)
64 | Req.Test.expect(:foo, 2, :b)
65 | Req.Test.expect(:foo, :c)
66 | assert Req.Test.__fetch_plug__(:foo) == :a
67 | assert Req.Test.__fetch_plug__(:foo) == :b
68 | assert Req.Test.__fetch_plug__(:foo) == :b
69 | assert Req.Test.__fetch_plug__(:foo) == :c
70 | end
71 | end
72 |
73 | describe "plug" do
74 | test "function" do
75 | Req.Test.stub(:foo, &Plug.Conn.send_resp(&1, 200, "1"))
76 | assert Req.get!(plug: {Req.Test, :foo}).body == "1"
77 |
78 | Req.Test.stub(:foo, fn conn, _ ->
79 | Plug.Conn.send_resp(conn, 200, "2")
80 | end)
81 |
82 | assert Req.get!(plug: {Req.Test, :foo}).body == "2"
83 |
84 | Task.async(fn ->
85 | assert Req.get!(plug: {Req.Test, :foo}).body == "2"
86 |
87 | Req.Test.stub(:foo, &Plug.Conn.send_resp(&1, 200, "3"))
88 | assert Req.get!(plug: {Req.Test, :foo}).body == "3"
89 | end)
90 | |> Task.await()
91 |
92 | assert Req.get!(plug: {Req.Test, :foo}).body == "2"
93 |
94 | assert_raise RuntimeError, ~r/cannot find mock/, fn ->
95 | Req.get(plug: {Req.Test, :bad})
96 | end
97 | end
98 |
99 | test "module" do
100 | defmodule Foo do
101 | def init([]), do: "default"
102 | def init(other), do: other
103 | def call(conn, string), do: Plug.Conn.send_resp(conn, 200, string)
104 | end
105 |
106 | Req.Test.stub(:foo, Foo)
107 | assert Req.get!(plug: {Req.Test, :foo}).body == "default"
108 |
109 | Req.Test.stub(:foo, {Foo, "hi"})
110 | assert Req.get!(plug: {Req.Test, :foo}).body == "hi"
111 | end
112 | end
113 |
114 | describe "allow/3" do
115 | test "allows the request via an owner process" do
116 | test_pid = self()
117 | ref = make_ref()
118 |
119 | Req.Test.stub(:foo, Plug.Logger)
120 |
121 | child_pid =
122 | spawn(fn ->
123 | # Make sure we have no $callers in the pdict.
124 | Process.delete(:"$callers")
125 |
126 | receive do
127 | :go -> send(test_pid, {ref, Req.Test.__fetch_plug__(:foo)})
128 | end
129 | end)
130 |
131 | Req.Test.stub(:foo, Plug.Logger)
132 | Req.Test.allow(:foo, self(), child_pid)
133 |
134 | send(child_pid, :go)
135 | assert_receive {^ref, Plug.Logger}
136 | end
137 | end
138 |
139 | describe "transport_error/2" do
140 | test "validate reason" do
141 | assert_raise ArgumentError, "unexpected Req.TransportError reason: :bad", fn ->
142 | Req.Test.transport_error(%Plug.Conn{}, :bad)
143 | end
144 | end
145 | end
146 |
147 | describe "verify!/0" do
148 | test "verifies all mocks for the current process in private mode" do
149 | Req.Test.set_req_test_to_private()
150 | Req.Test.verify!()
151 |
152 | Req.Test.expect(:foo, 2, &Req.Test.json(&1, %{}))
153 | Req.Test.expect(:bar, 1, &Req.Test.json(&1, %{}))
154 |
155 | error = assert_raise(RuntimeError, &Req.Test.verify!/0)
156 | assert error.message =~ "error while verifying Req.Test expectations for"
157 | assert error.message =~ "* expected :foo to be still used 2 more times"
158 | assert error.message =~ "* expected :bar to be still used 1 more times"
159 |
160 | Req.request!(plug: {Req.Test, :foo})
161 |
162 | error = assert_raise(RuntimeError, &Req.Test.verify!/0)
163 | assert error.message =~ "error while verifying Req.Test expectations for"
164 | assert error.message =~ "* expected :foo to be still used 1 more times"
165 | assert error.message =~ "* expected :bar to be still used 1 more times"
166 |
167 | Req.request!(plug: {Req.Test, :foo})
168 | Req.request!(plug: {Req.Test, :bar})
169 | Req.Test.verify!()
170 | end
171 | end
172 |
173 | describe "verify!/1" do
174 | test "verifies all mocks for the current process in private mode" do
175 | Req.Test.set_req_test_to_private()
176 | Req.Test.verify!(:foo)
177 |
178 | Req.Test.expect(:foo, 2, &Req.Test.json(&1, %{}))
179 |
180 | # Verifying a different key is fine.
181 | Req.Test.verify!(:bar)
182 |
183 | error = assert_raise(RuntimeError, fn -> Req.Test.verify!(:foo) end)
184 | assert error.message =~ "error while verifying Req.Test expectations for"
185 | assert error.message =~ "* expected :foo to be still used 2 more times"
186 |
187 | Req.request!(plug: {Req.Test, :foo})
188 |
189 | error = assert_raise(RuntimeError, fn -> Req.Test.verify!(:foo) end)
190 | assert error.message =~ "error while verifying Req.Test expectations for"
191 | assert error.message =~ "* expected :foo to be still used 1 more times"
192 |
193 | Req.request!(plug: {Req.Test, :foo})
194 | Req.Test.verify!(:foo)
195 | end
196 | end
197 | end
198 |
--------------------------------------------------------------------------------
/test/req/utils_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Req.UtilsTest do
2 | use ExUnit.Case, async: true
3 |
4 | # TODO: Remove when we require Elixir 1.14
5 | if Version.match?(System.version(), "~> 1.14") do
6 | doctest Req.Utils
7 | end
8 |
9 | describe "aws_sigv4_headers" do
10 | test "GET" do
11 | options = [
12 | access_key_id: "dummy-access-key-id",
13 | secret_access_key: "dummy-secret-access-key",
14 | region: "dummy-region",
15 | service: "s3",
16 | datetime: ~U[2024-01-01 09:00:00Z],
17 | method: :get,
18 | url: "https://s3/foo/:bar",
19 | headers: [{"host", "s3"}],
20 | body: ""
21 | ]
22 |
23 | signature1 = Req.Utils.aws_sigv4_headers(options)
24 |
25 | signature2 =
26 | :aws_signature.sign_v4(
27 | Keyword.fetch!(options, :access_key_id),
28 | Keyword.fetch!(options, :secret_access_key),
29 | Keyword.fetch!(options, :region),
30 | Keyword.fetch!(options, :service),
31 | Keyword.fetch!(options, :datetime) |> NaiveDateTime.to_erl(),
32 | Keyword.fetch!(options, :method) |> Atom.to_string() |> String.upcase(),
33 | Keyword.fetch!(options, :url),
34 | Keyword.fetch!(options, :headers),
35 | Keyword.fetch!(options, :body),
36 | Keyword.take(options, [:body_digest])
37 | )
38 |
39 | assert signature1 ==
40 | Enum.map(signature2, fn {name, value} -> {String.downcase(name), value} end)
41 | end
42 |
43 | test "custom port" do
44 | options = [
45 | access_key_id: "dummy-access-key-id",
46 | secret_access_key: "dummy-secret-access-key",
47 | region: "dummy-region",
48 | service: "s3",
49 | datetime: ~U[2024-01-01 09:00:00Z],
50 | method: :get,
51 | url: "https://s3-compatible.com:4433/foo/:bar",
52 | headers: [],
53 | body: ""
54 | ]
55 |
56 | signature1 = Req.Utils.aws_sigv4_headers(options)
57 |
58 | signature2 =
59 | Req.Utils.aws_sigv4_headers(
60 | Keyword.put(options, :headers, [{"host", "s3-compatible.com"}])
61 | )
62 |
63 | signature3 =
64 | :aws_signature.sign_v4(
65 | Keyword.fetch!(options, :access_key_id),
66 | Keyword.fetch!(options, :secret_access_key),
67 | Keyword.fetch!(options, :region),
68 | Keyword.fetch!(options, :service),
69 | Keyword.fetch!(options, :datetime) |> NaiveDateTime.to_erl(),
70 | Keyword.fetch!(options, :method) |> Atom.to_string() |> String.upcase(),
71 | Keyword.fetch!(options, :url),
72 | [{"host", "s3-compatible.com:4433"}],
73 | Keyword.fetch!(options, :body),
74 | Keyword.take(options, [:body_digest])
75 | )
76 |
77 | assert signature1 === signature2
78 |
79 | assert signature1 ==
80 | Enum.map(signature3, fn {name, value} -> {String.downcase(name), value} end)
81 |
82 | assert signature2 ==
83 | Enum.map(signature3, fn {name, value} -> {String.downcase(name), value} end)
84 | end
85 | end
86 |
87 | describe "aws_sigv4_url" do
88 | test "GET" do
89 | options = [
90 | access_key_id: "dummy-access-key-id",
91 | secret_access_key: "dummy-secret-access-key",
92 | region: "dummy-region",
93 | service: "s3",
94 | datetime: ~U[2024-01-01 09:00:00Z],
95 | method: :get,
96 | url: "https://s3/foo/:bar"
97 | ]
98 |
99 | url1 = to_string(Req.Utils.aws_sigv4_url(options))
100 |
101 | url2 =
102 | """
103 | https://s3/foo/%3Abar?\
104 | X-Amz-Algorithm=AWS4-HMAC-SHA256\
105 | &X-Amz-Credential=dummy-access-key-id%2F20240101%2Fdummy-region%2Fs3%2Faws4_request\
106 | &X-Amz-Date=20240101T090000Z\
107 | &X-Amz-Expires=86400\
108 | &X-Amz-SignedHeaders=host\
109 | &X-Amz-Signature=7fd16f0749b0902acde5a3d8933315006f2993b279b995cad880165ff4be75ff\
110 | """
111 |
112 | assert url1 == url2
113 | end
114 |
115 | test "custom port" do
116 | options = [
117 | access_key_id: "dummy-access-key-id",
118 | secret_access_key: "dummy-secret-access-key",
119 | region: "dummy-region",
120 | service: "s3",
121 | datetime: ~U[2024-01-01 09:00:00Z],
122 | method: :get,
123 | url: "https://s3-compatible.com:4433/foo/:bar"
124 | ]
125 |
126 | url1 = to_string(Req.Utils.aws_sigv4_url(options))
127 |
128 | url2 =
129 | """
130 | https://s3-compatible.com:4433/foo/%3Abar?\
131 | X-Amz-Algorithm=AWS4-HMAC-SHA256\
132 | &X-Amz-Credential=dummy-access-key-id%2F20240101%2Fdummy-region%2Fs3%2Faws4_request\
133 | &X-Amz-Date=20240101T090000Z\
134 | &X-Amz-Expires=86400\
135 | &X-Amz-SignedHeaders=host\
136 | &X-Amz-Signature=860c79d524ea488a96b56d9e687348f108262738a5205f907cc0794f73d23403\
137 | """
138 |
139 | assert url1 == url2
140 | end
141 |
142 | test "custom headers" do
143 | options = [
144 | access_key_id: "dummy-access-key-id",
145 | secret_access_key: "dummy-secret-access-key",
146 | region: "dummy-region",
147 | service: "s3",
148 | datetime: ~U[2024-01-01 09:00:00Z],
149 | method: :put,
150 | url: "https://s3/foo/hello_world.txt",
151 | headers: [{"content-length", 11}]
152 | ]
153 |
154 | url1 = to_string(Req.Utils.aws_sigv4_url(options))
155 |
156 | url2 =
157 | """
158 | https://s3/foo/hello_world.txt?\
159 | X-Amz-Algorithm=AWS4-HMAC-SHA256\
160 | &X-Amz-Credential=dummy-access-key-id%2F20240101%2Fdummy-region%2Fs3%2Faws4_request\
161 | &X-Amz-Date=20240101T090000Z\
162 | &X-Amz-Expires=86400\
163 | &X-Amz-SignedHeaders=content-length%3Bhost\
164 | &X-Amz-Signature=dbb4ae08836db5089a924f2eb52eb52dbc1c372a384a6a99ceb469b14b83e995\
165 | """
166 |
167 | assert url1 == url2
168 | end
169 |
170 | test "custom query" do
171 | options = [
172 | access_key_id: "dummy-access-key-id",
173 | secret_access_key: "dummy-secret-access-key",
174 | region: "dummy-region",
175 | service: "s3",
176 | datetime: ~U[2024-01-01 09:00:00Z],
177 | method: :get,
178 | url: "https://s3/foo/hello_world.txt",
179 | query: [{"response-content-disposition", ~s(attachment; filename="hello_world.txt")}]
180 | ]
181 |
182 | url1 = to_string(Req.Utils.aws_sigv4_url(options))
183 |
184 | url2 =
185 | """
186 | https://s3/foo/hello_world.txt?\
187 | X-Amz-Algorithm=AWS4-HMAC-SHA256\
188 | &X-Amz-Credential=dummy-access-key-id%2F20240101%2Fdummy-region%2Fs3%2Faws4_request\
189 | &X-Amz-Date=20240101T090000Z\
190 | &X-Amz-Expires=86400\
191 | &X-Amz-SignedHeaders=host\
192 | &response-content-disposition=attachment%3B%20filename%3D%22hello_world.txt%22\
193 | &X-Amz-Signature=574a638441ff0e623c800b7379408748d58f3e6679e3ca2619c5900fa030beed\
194 | """
195 |
196 | assert url1 == url2
197 | end
198 | end
199 |
200 | describe "encode_form_multipart" do
201 | test "it works" do
202 | %{content_type: content_type, body: body, size: size} =
203 | Req.Utils.encode_form_multipart(
204 | [
205 | field1: 1,
206 | field2: {"22", filename: "2.txt"},
207 | field3: {["3", ?3, ?3], filename: "3.txt", content_type: "text/plain"}
208 | ],
209 | boundary: "foo"
210 | )
211 |
212 | body = IO.iodata_to_binary(body)
213 | assert size == byte_size(body)
214 | assert content_type == "multipart/form-data; boundary=foo"
215 |
216 | assert body == """
217 | --foo\r\n\
218 | content-disposition: form-data; name=\"field1\"\r\n\
219 | \r\n\
220 | 1\r\n\
221 | --foo\r\n\
222 | content-disposition: form-data; name=\"field2\"; filename=\"2.txt\"\r\n\
223 | \r\n\
224 | 22\r\n\
225 | --foo\r\n\
226 | content-disposition: form-data; name=\"field3\"; filename=\"3.txt\"\r\n\
227 | content-type: text/plain\r\n\
228 | \r\n\
229 | 333\r\n\
230 | --foo--\r\n\
231 | """
232 | end
233 |
234 | test "it works with size" do
235 | %{content_type: content_type, body: body, size: size} =
236 | Req.Utils.encode_form_multipart([field1: {"value", size: 5}], boundary: "foo")
237 |
238 | body = IO.iodata_to_binary(body)
239 |
240 | assert size == byte_size(body)
241 | assert content_type == "multipart/form-data; boundary=foo"
242 |
243 | assert body == """
244 | --foo\r\n\
245 | content-disposition: form-data; name=\"field1\"\r\n\
246 | \r\n\
247 | value\r\n\
248 | --foo--\r\n\
249 | """
250 | end
251 |
252 | test "can accept any enumerable" do
253 | enum = Stream.cycle(["a"]) |> Stream.take(10)
254 |
255 | %{body: body, size: size} =
256 | Req.Utils.encode_form_multipart([field1: {enum, size: 10}], boundary: "foo")
257 |
258 | body = body |> Enum.to_list() |> IO.iodata_to_binary()
259 |
260 | assert size == byte_size(body)
261 | end
262 |
263 | test "blindly trust :content_length option" do
264 | enum = Stream.cycle(["a"]) |> Stream.take(10)
265 | advertised_length = 50
266 |
267 | %{body: body, size: size} =
268 | Req.Utils.encode_form_multipart([field1: {enum, size: advertised_length}],
269 | boundary: "foo"
270 | )
271 |
272 | body = body |> Enum.to_list() |> IO.iodata_to_binary()
273 |
274 | assert size ==
275 | byte_size(body) + advertised_length - IO.iodata_length(enum |> Enum.to_list())
276 | end
277 |
278 | test "can return nil size" do
279 | enum = Stream.cycle(["a"]) |> Stream.take(10)
280 |
281 | %{size: size} =
282 | Req.Utils.encode_form_multipart([field1: {enum, []}],
283 | boundary: "foo"
284 | )
285 |
286 | assert size == nil
287 | end
288 |
289 | @tag :tmp_dir
290 | test "can return stream", %{tmp_dir: tmp_dir} do
291 | File.write!("#{tmp_dir}/2.txt", "22")
292 |
293 | %{body: body, size: size} =
294 | Req.Utils.encode_form_multipart(
295 | [
296 | field1: 1,
297 | field2: File.stream!("#{tmp_dir}/2.txt")
298 | ],
299 | boundary: "foo"
300 | )
301 |
302 | assert is_function(body)
303 | body = body |> Enum.to_list() |> IO.iodata_to_binary()
304 | assert size == byte_size(body)
305 |
306 | assert body == """
307 | --foo\r\n\
308 | content-disposition: form-data; name=\"field1\"\r\n\
309 | \r\n\
310 | 1\r\n\
311 | --foo\r\n\
312 | content-disposition: form-data; name=\"field2\"; filename=\"2.txt\"\r\n\
313 | content-type: text/plain\r\n\
314 | \r\n\
315 | 22\r\n\
316 | --foo--\r\n\
317 | """
318 |
319 | %{body: body, size: size} =
320 | Req.Utils.encode_form_multipart(
321 | [
322 | field2: File.stream!("#{tmp_dir}/2.txt"),
323 | field1: 1
324 | ],
325 | boundary: "foo"
326 | )
327 |
328 | assert is_function(body)
329 | body = body |> Enum.to_list() |> IO.iodata_to_binary()
330 | assert size == byte_size(body)
331 |
332 | assert body == """
333 | --foo\r\n\
334 | content-disposition: form-data; name=\"field2\"; filename=\"2.txt\"\r\n\
335 | content-type: text/plain\r\n\
336 | \r\n\
337 | 22\r\n\
338 | --foo\r\n\
339 | content-disposition: form-data; name=\"field1\"\r\n\
340 | \r\n\
341 | 1\r\n\
342 | --foo--\r\n\
343 | """
344 | end
345 | end
346 | end
347 |
--------------------------------------------------------------------------------
/test/req_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ReqTest do
2 | use ExUnit.Case, async: true
3 | import TestHelper, only: [start_http_server: 1]
4 |
5 | doctest Req,
6 | only: [
7 | new: 1,
8 | merge: 2,
9 | get_headers_list: 1
10 | ]
11 |
12 | setup do
13 | bypass = Bypass.open()
14 | [bypass: bypass, url: "http://localhost:#{bypass.port}"]
15 | end
16 |
17 | test "default_headers", c do
18 | Bypass.expect(c.bypass, "GET", "/", fn conn ->
19 | [user_agent] = Plug.Conn.get_req_header(conn, "user-agent")
20 | Plug.Conn.send_resp(conn, 200, user_agent)
21 | end)
22 |
23 | assert "req/" <> _ = Req.get!(c.url).body
24 | end
25 |
26 | test "headers", c do
27 | pid = self()
28 |
29 | Bypass.expect(c.bypass, "GET", "/", fn conn ->
30 | headers = Enum.filter(conn.req_headers, fn {name, _} -> String.starts_with?(name, "x-") end)
31 | send(pid, {:headers, headers})
32 | Plug.Conn.send_resp(conn, 200, "ok")
33 | end)
34 |
35 | Req.get!(c.url, headers: [x_a: 1, x_b: ~U[2021-01-01 09:00:00Z]])
36 | assert_receive {:headers, headers}
37 | assert headers == [{"x-a", "1"}, {"x-b", "Fri, 01 Jan 2021 09:00:00 GMT"}]
38 |
39 | req = Req.new(headers: [x_a: 1, x_a: 2])
40 |
41 | unless Req.MixProject.legacy_headers_as_lists?() do
42 | assert req.headers == %{"x-a" => ["1", "2"]}
43 | end
44 |
45 | Req.get!(req, url: c.url)
46 | assert_receive {:headers, headers}
47 | assert headers == [{"x-a", "1, 2"}]
48 |
49 | req = Req.new(headers: [x_a: 1, x_b: 1])
50 | Req.get!(req, url: c.url, headers: [x_a: 2])
51 | assert_receive {:headers, headers}
52 | assert headers == [{"x-a", "2"}, {"x-b", "1"}]
53 | end
54 |
55 | test "redact" do
56 | assert inspect(Req.new(auth: {:bearer, "foo"})) =~ ~s|auth: {:bearer, "***"}|
57 |
58 | assert inspect(Req.new(auth: {:basic, "foo:bar"})) =~ ~s|auth: {:basic, "foo****"}|
59 |
60 | assert inspect(Req.new(auth: fn -> {:basic, "foo:bar"} end)) =~ ~s|auth: #Function|
61 |
62 | if Req.MixProject.legacy_headers_as_lists?() do
63 | assert inspect(Req.new(headers: [authorization: "bearer foobar"])) =~
64 | ~s|{"authorization", "bearer ***"}|
65 | else
66 | assert inspect(Req.new(headers: [authorization: "bearer foo"])) =~
67 | ~s|"authorization" => ["bearer ***"]|
68 | end
69 | end
70 |
71 | test "plugins" do
72 | foo = fn req ->
73 | Req.Request.register_options(req, [:foo])
74 | end
75 |
76 | req = Req.new(plugins: [foo], foo: 42)
77 | assert req.options.foo == 42
78 | end
79 |
80 | test "async enumerable" do
81 | %{url: origin_url} =
82 | start_http_server(fn conn ->
83 | conn = Plug.Conn.send_chunked(conn, 200)
84 | {:ok, conn} = Plug.Conn.chunk(conn, "foo")
85 | {:ok, conn} = Plug.Conn.chunk(conn, "bar")
86 | {:ok, conn} = Plug.Conn.chunk(conn, "baz")
87 | conn
88 | end)
89 |
90 | %{url: echo_url} =
91 | start_http_server(fn conn ->
92 | {:ok, body, conn} = Plug.Conn.read_body(conn)
93 | Plug.Conn.send_resp(conn, 200, body)
94 | end)
95 |
96 | resp = Req.get!(origin_url, into: :self)
97 | assert Req.put!(echo_url, body: resp.body).body == "foobarbaz"
98 | end
99 | end
100 |
--------------------------------------------------------------------------------
/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | defmodule TestHelper do
2 | def start_http_server(plug) do
3 | options = [
4 | scheme: :http,
5 | port: 0,
6 | plug: fn conn, _ -> plug.(conn) end,
7 | startup_log: false,
8 | http_options: [compress: false]
9 | ]
10 |
11 | pid = ExUnit.Callbacks.start_supervised!({Bandit, options})
12 | {:ok, {_ip, port}} = ThousandIsland.listener_info(pid)
13 | %{pid: pid, url: URI.new!("http://localhost:#{port}")}
14 | end
15 |
16 | def start_tcp_server(fun) do
17 | {:ok, listen_socket} = :gen_tcp.listen(0, mode: :binary, active: false)
18 | {:ok, port} = :inet.port(listen_socket)
19 | pid = ExUnit.Callbacks.start_supervised!({Task, fn -> accept(listen_socket, fun) end})
20 | %{pid: pid, url: URI.new!("http://localhost:#{port}")}
21 | end
22 |
23 | defp accept(listen_socket, fun) do
24 | case :gen_tcp.accept(listen_socket) do
25 | {:ok, socket} ->
26 | fun.(socket)
27 | :ok = :gen_tcp.close(socket)
28 |
29 | {:error, :closed} ->
30 | :ok
31 | end
32 |
33 | accept(listen_socket, fun)
34 | end
35 | end
36 |
37 | defmodule EzstdFilter do
38 | # Filter out:
39 | # 17:56:39.116 [debug] Loading library: ~c"/path/to/req/_build/test/lib/ezstd/priv/ezstd_nif"
40 | def filter(log_event, _opts) do
41 | case log_event.msg do
42 | {"Loading library" <> _, [path]} ->
43 | ^path = to_charlist(Application.app_dir(:ezstd, "priv/ezstd_nif"))
44 | :stop
45 |
46 | _ ->
47 | :ignore
48 | end
49 | end
50 | end
51 |
52 | :logger.add_primary_filter(:ezstd_filter, {&EzstdFilter.filter/2, []})
53 |
54 | ExUnit.configure(exclude: :integration)
55 | ExUnit.start()
56 |
--------------------------------------------------------------------------------