├── .cargo └── config.toml ├── .github └── workflows │ ├── ci.yml │ └── netsim.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md └── src ├── blobs.rs ├── get.rs ├── lib.rs ├── main.rs ├── progress.rs ├── protocol.rs ├── provider.rs ├── tls.rs ├── tls ├── certificate.rs └── verifier.rs └── util.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = ["-Wmissing_debug_implementations"] 3 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | 9 | env: 10 | RUST_BACKTRACE: 1 11 | RUSTFLAGS: -Dwarnings 12 | MSRV: "1.63" 13 | 14 | 15 | jobs: 16 | build_and_test_nix: 17 | name: Build and test (Nix) 18 | runs-on: ${{ matrix.os }} 19 | strategy: 20 | matrix: 21 | os: [ubuntu-latest, macOS-latest] 22 | rust: [nightly, stable] 23 | 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@master 27 | with: 28 | submodules: recursive 29 | 30 | - name: Install ${{ matrix.rust }} 31 | uses: actions-rs/toolchain@v1 32 | with: 33 | toolchain: ${{ matrix.rust }} 34 | override: true 35 | 36 | - name: check 37 | uses: actions-rs/cargo@v1 38 | with: 39 | command: check 40 | args: --all --bins --tests 41 | 42 | - name: tests 43 | uses: actions-rs/cargo@v1 44 | with: 45 | command: test 46 | args: --all 47 | 48 | build_and_test_windows: 49 | name: Build and test (Windows) 50 | runs-on: ${{ matrix.os }} 51 | strategy: 52 | matrix: 53 | os: [windows-latest] 54 | rust: [nightly, stable] 55 | target: 56 | - x86_64-pc-windows-gnu 57 | - x86_64-pc-windows-msvc 58 | 59 | steps: 60 | - name: Checkout 61 | uses: actions/checkout@master 62 | with: 63 | submodules: recursive 64 | 65 | - name: Install ${{ matrix.rust }} 66 | uses: actions-rs/toolchain@v1 67 | with: 68 | toolchain: ${{ matrix.rust }} 69 | target: ${{ matrix.target }} 70 | override: true 71 | 72 | - uses: msys2/setup-msys2@v2 73 | - name: check 74 | uses: actions-rs/cargo@v1 75 | with: 76 | command: check 77 | args: --all --bins --tests --target ${{ matrix.target }} 78 | 79 | - name: check bench 80 | uses: actions-rs/cargo@v1 81 | if: matrix.rust == 'nightly' 82 | with: 83 | command: check 84 | target: ${{ matrix.target }} 85 | args: --benches 86 | 87 | - name: tests 88 | uses: actions-rs/cargo@v1 89 | with: 90 | command: test 91 | args: --all --target ${{ matrix.target }} 92 | 93 | cross: 94 | name: Cross compile 95 | runs-on: ubuntu-latest 96 | strategy: 97 | matrix: 98 | target: 99 | - i686-unknown-linux-gnu 100 | - arm-linux-androideabi 101 | 102 | steps: 103 | - name: Checkout 104 | uses: actions/checkout@master 105 | with: 106 | submodules: recursive 107 | 108 | - name: Install nightly 109 | uses: actions-rs/toolchain@v1 110 | with: 111 | toolchain: nightly 112 | override: true 113 | 114 | - name: Install cross 115 | run: cargo install cross 116 | 117 | - name: check 118 | run: cross check --all --target ${{ matrix.target }} 119 | 120 | - name: test 121 | run: cross test --all --target ${{ matrix.target }} 122 | 123 | check_fmt_and_docs: 124 | name: Checking fmt and docs 125 | runs-on: ubuntu-latest 126 | steps: 127 | - uses: actions/checkout@master 128 | 129 | - uses: actions-rs/toolchain@v1 130 | with: 131 | profile: minimal 132 | toolchain: nightly 133 | override: true 134 | components: rustfmt 135 | 136 | - name: setup 137 | run: | 138 | rustup component add rustfmt 139 | rustc --version 140 | 141 | - name: fmt 142 | run: cargo fmt --all -- --check 143 | 144 | - name: Docs 145 | run: cargo doc 146 | 147 | clippy_check: 148 | runs-on: ubuntu-latest 149 | steps: 150 | - uses: actions/checkout@master 151 | - uses: actions-rs/toolchain@v1 152 | with: 153 | profile: minimal 154 | toolchain: stable 155 | override: true 156 | components: clippy 157 | - name: clippy check 158 | run: cargo clippy --message-format=json --all-features --all-targets 159 | 160 | msrv: 161 | name: Minimal Supported Rust Version 162 | runs-on: ubuntu-latest 163 | steps: 164 | - uses: actions/checkout@master 165 | - uses: actions-rs/toolchain@v1 166 | with: 167 | profile: minimal 168 | toolchain: "${{ env.MSRV }}" 169 | override: true 170 | - name: Check MSRV all features 171 | run: | 172 | cargo +$MSRV check --workspace --all-targets --no-default-features 173 | -------------------------------------------------------------------------------- /.github/workflows/netsim.yml: -------------------------------------------------------------------------------- 1 | name: netsim-CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | issue_comment: 8 | types: [created, edited, deleted] 9 | 10 | env: 11 | RUST_BACKTRACE: 1 12 | RUSTFLAGS: -Dwarnings 13 | MSRV: "1.63" 14 | 15 | jobs: 16 | netsim: 17 | name: Run network simulations/benchmarks 18 | if: >- 19 | (github.event_name == 'issue_comment' && 20 | github.event.issue.pull_request && 21 | github.event.comment.body == '/netsim') || github.event_name != 'issue_comment' 22 | runs-on: [self-hosted, linux, X64] 23 | permissions: 24 | issues: write 25 | pull-requests: write 26 | steps: 27 | - name: Checkout 28 | uses: actions/checkout@master 29 | with: 30 | submodules: recursive 31 | 32 | - name: Install rust stable 33 | uses: actions-rs/toolchain@v1 34 | with: 35 | toolchain: stable 36 | override: true 37 | 38 | - name: Build sendme 39 | run: | 40 | cargo build --release 41 | 42 | - name: Fetch and build chuck 43 | run: | 44 | git clone https://github.com/n0-computer/chuck.git 45 | cd chuck 46 | cargo build --release 47 | 48 | - name: Install netsim deps 49 | run: | 50 | cd chuck/netsim 51 | sudo apt update 52 | ./setup.sh 53 | 54 | - name: Copy binaries to right location 55 | run: | 56 | cp target/release/sendme chuck/netsim/bins/sendme 57 | cp chuck/target/release/chuck chuck/netsim/bins/chuck 58 | 59 | - name: Run tests 60 | run: | 61 | cd chuck/netsim 62 | sudo kill -9 $(pgrep ovs) 63 | sudo mn --clean 64 | sudo python3 main.py sims/standard 65 | 66 | - name: Generate report 67 | id: generate_report 68 | run: | 69 | cd chuck/netsim 70 | python3 reports_csv.py > report.txt 71 | export NETSIM_REPORT=$(cat report.txt) 72 | echo "NETSIM_REPORT<> ${GITHUB_OUTPUT} 73 | echo "${NETSIM_REPORT}" >> ${GITHUB_OUTPUT} 74 | echo "EOFMARKER" >> ${GITHUB_OUTPUT} 75 | 76 | - name: Setup Environment (PR) 77 | if: ${{ github.event_name == 'pull_request' }} 78 | shell: bash 79 | run: | 80 | echo "LAST_COMMIT_SHA=$(git rev-parse --short ${{ github.event.pull_request.head.sha }})" >> ${GITHUB_ENV} 81 | - name: Setup Environment (Push) 82 | if: ${{ github.event_name == 'push' }} 83 | shell: bash 84 | run: | 85 | echo "LAST_COMMIT_SHA=$(git rev-parse --short ${GITHUB_SHA})" >> ${GITHUB_ENV} 86 | 87 | - name: Respond Issue 88 | uses: peter-evans/create-or-update-comment@v2 89 | if: github.event_name == 'issue_comment' 90 | with: 91 | issue-number: ${{ github.event.issue.number }} 92 | body: | 93 | body: | 94 | `${{ github.head_ref }}.${{ env.LAST_COMMIT_SHA }}` 95 |
96 | Perf report 97 | 98 | ```json 99 | 100 | ${{ steps.generate_report.outputs.NETSIM_REPORT }} 101 | 102 | ``` 103 |
104 | 105 | - name: Respond PR 106 | uses: peter-evans/create-or-update-comment@v2 107 | if: github.event.pull_request 108 | with: 109 | issue-number: ${{ github.event.pull_request.number }} 110 | body: | 111 | `${{ github.head_ref }}.${{ env.LAST_COMMIT_SHA }}` 112 |
113 | Perf report 114 | 115 | ```json 116 | 117 | ${{ steps.generate_report.outputs.NETSIM_REPORT }} 118 | 119 | ``` 120 |
121 | 122 | - name: Dump report 123 | run: | 124 | export AWS_ACCESS_KEY_ID=${{secrets.S3_ACCESS_KEY_ID}} 125 | export AWS_SECRET_ACCESS_KEY=${{secrets.S3_ACCESS_KEY}} 126 | export AWS_DEFAULT_REGION=us-west-2 127 | 128 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 129 | unzip -q awscliv2.zip 130 | sudo ./aws/install --update 131 | 132 | cd chuck/netsim 133 | python3 reports_csv.py --prom --commit ${{ env.LAST_COMMIT_SHA }} > report_prom.txt 134 | 135 | tar cvzf report.tar.gz report_prom.txt report.txt logs/ report/ 136 | aws s3 cp ./report.tar.gz s3://${{secrets.S3_REPORT_BUCKET}}/${{ env.LAST_COMMIT_SHA }}.tar.gz --no-progress 137 | 138 | instance=$(echo "${{ github.head_ref }}" | tr -c '[:alnum:]' '_') 139 | d=$(cat report_prom.txt) 140 | prom_data=$(printf "%s\n " "$d") 141 | curl -X POST -H "Content-Type: text/plain" --data "$prom_data" ${{secrets.PROM_ENDPOINT}}/metrics/job/netsim/instance/${instance} 142 | 143 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sendme" 3 | version = "0.1.0" 4 | edition = "2021" 5 | readme = "README.md" 6 | license = "MIT/Apache-2.0" 7 | authors = ["dignifiedquire "] 8 | repository = "https://github.com/n0-computer/sendme" 9 | 10 | # Sadly this also needs to be updated in .github/workflows/ci.yml 11 | rust-version = "1.63" 12 | 13 | [dependencies] 14 | abao = { version = "0.1.2" } 15 | anyhow = { version = "1", features = ["backtrace"] } 16 | base64 = "0.21.0" 17 | blake3 = "1.3.3" 18 | bytes = "1" 19 | clap = { version = "4", features = ["derive"], optional = true } 20 | console = { version = "0.15.5", optional = true } 21 | data-encoding = { version = "2.3.3", optional = true } 22 | der = { version = "0.6", features = ["alloc", "derive"] } 23 | ed25519-dalek = { version = "1.0.1", features = ["serde"] } 24 | futures = "0.3.25" 25 | indicatif = { version = "0.17", features = ["tokio"], optional = true } 26 | multibase = { version = "0.9.1", optional = true } 27 | portable-atomic = "1" 28 | postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } 29 | quinn = "0.9.3" 30 | rand = "0.7" 31 | rcgen = "0.10" 32 | ring = "0.16.20" 33 | rustls = { version = "0.20.8", default-features = false, features = ["dangerous_configuration"] } 34 | serde = { version = "1", features = ["derive"] } 35 | ssh-key = { version = "0.5.1", features = ["ed25519", "std", "rand_core"] } 36 | tempfile = "3" 37 | thiserror = "1" 38 | tokio = { version = "1", features = ["full"] } 39 | tokio-util = { version = "0.7", features = ["io-util", "io"] } 40 | tracing = "0.1" 41 | tracing-futures = "0.2.5" 42 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 43 | webpki = "0.22" 44 | x509-parser = "0.14" 45 | zeroize = "1.5" 46 | 47 | [dev-dependencies] 48 | hex = "0.4.3" 49 | proptest = "1.0.0" 50 | rand = "0.7" 51 | testdir = "0.7.1" 52 | 53 | [features] 54 | default = ["cli"] 55 | cli = ["clap", "console", "indicatif", "data-encoding", "multibase"] 56 | 57 | [[bin]] 58 | name = "sendme" 59 | required-features = ["cli"] 60 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

sendme

2 |
3 | 4 | Send data over the internet. 5 | 6 |
7 | 8 |
9 | 10 |
11 | 12 | 13 | Crates.io version 15 | 16 | 17 | 18 | Download 20 | 21 | 22 | 23 | docs.rs docs 25 | 26 |
27 | 28 |
29 |

30 | 31 | API Docs 32 | 33 | | 34 | 35 | Releases 36 | 37 |

38 |
39 |
40 | 41 | ## Usage 42 | 43 | ### Cli 44 | Sending data 45 | ```sh 46 | $ ./sendme provide 47 | ``` 48 | 49 | Receiving data 50 | ```sh 51 | $ ./sendme get 52 | ``` 53 | 54 | ### As a library 55 | Disable default features when using `sendme` as a library: 56 | `sendme = { version: "...", default-features = false }` 57 | 58 | This removes dependencies that are only relevant when using `sendme` as 59 | a library. 60 | 61 | # License 62 | 63 | This project is licensed under either of 64 | 65 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 66 | http://www.apache.org/licenses/LICENSE-2.0) 67 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 68 | http://opensource.org/licenses/MIT) 69 | 70 | at your option. 71 | 72 | ### Contribution 73 | 74 | Unless you explicitly state otherwise, any contribution intentionally submitted 75 | for inclusion in this project by you, as defined in the Apache-2.0 license, 76 | shall be dual licensed as above, without any additional terms or conditions. 77 | -------------------------------------------------------------------------------- /src/blobs.rs: -------------------------------------------------------------------------------- 1 | //! Types for blobs and collections of blobs 2 | use anyhow::{Context, Result}; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::util::Hash; 6 | 7 | /// A collection of blobs 8 | #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] 9 | pub struct Collection { 10 | /// 11 | /// The name of this collection 12 | pub(crate) name: String, 13 | /// Links to the blobs in this collection 14 | pub(crate) blobs: Vec, 15 | /// The total size of the raw_data referred to by all links 16 | pub(crate) total_blobs_size: u64, 17 | } 18 | 19 | impl Collection { 20 | /// Deserialize a collection from a byte slice 21 | pub fn from_bytes(data: &[u8]) -> Result { 22 | let c: Collection = 23 | postcard::from_bytes(data).context("failed to serialize Collection data")?; 24 | Ok(c) 25 | } 26 | 27 | /// Total size of the raw data referred to by all blobs in this collection 28 | pub fn total_blobs_size(&self) -> u64 { 29 | self.total_blobs_size 30 | } 31 | 32 | /// The name of this collection 33 | pub fn name(&self) -> &str { 34 | &self.name 35 | } 36 | 37 | /// The number of blobs in this collection 38 | pub fn total_entries(&self) -> u64 { 39 | self.blobs.len() as u64 40 | } 41 | } 42 | 43 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 44 | pub(crate) struct Blob { 45 | /// The name of this blob of data 46 | pub(crate) name: String, 47 | /// The hash of the blob of data 48 | pub(crate) hash: Hash, 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | use super::*; 54 | 55 | #[test] 56 | fn roundtrip_blob() { 57 | let b = Blob { 58 | name: "test".to_string(), 59 | hash: abao::Hash::from_hex( 60 | "3aa61c409fd7717c9d9c639202af2fae470c0ef669be7ba2caea5779cb534e9d", 61 | ) 62 | .unwrap() 63 | .into(), 64 | }; 65 | 66 | let mut buf = bytes::BytesMut::zeroed(1024); 67 | postcard::to_slice(&b, &mut buf).unwrap(); 68 | let deserialize_b: Blob = postcard::from_bytes(&buf).unwrap(); 69 | assert_eq!(b, deserialize_b); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/get.rs: -------------------------------------------------------------------------------- 1 | //! The client side API 2 | //! 3 | //! The main entry point is [`run`]. This function takes callbacks that will 4 | //! be invoked when blobs or collections are received. It is up to the caller 5 | //! to store the received data. 6 | use std::fmt::Debug; 7 | use std::io; 8 | use std::net::SocketAddr; 9 | use std::sync::Arc; 10 | use std::time::{Duration, Instant}; 11 | 12 | use crate::blobs::Collection; 13 | use crate::protocol::{ 14 | read_bao_encoded, read_lp_data, write_lp, AuthToken, Handshake, Request, Res, Response, 15 | }; 16 | use crate::tls::{self, Keypair, PeerId}; 17 | use abao::decode::AsyncSliceDecoder; 18 | use anyhow::{anyhow, bail, ensure, Result}; 19 | use bytes::BytesMut; 20 | use futures::Future; 21 | use postcard::experimental::max_size::MaxSize; 22 | use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; 23 | use tracing::{debug, error}; 24 | 25 | pub use crate::util::Hash; 26 | 27 | const MAX_DATA_SIZE: u64 = 1024 * 1024 * 1024; 28 | 29 | /// Options for the client 30 | #[derive(Clone, Debug)] 31 | pub struct Options { 32 | /// The address to connect to 33 | pub addr: SocketAddr, 34 | /// The peer id to expect 35 | pub peer_id: Option, 36 | /// Whether to log the SSL keys when `SSLKEYLOGFILE` environment variable is set. 37 | pub keylog: bool, 38 | } 39 | 40 | impl Default for Options { 41 | fn default() -> Self { 42 | Options { 43 | addr: "127.0.0.1:4433".parse().unwrap(), 44 | peer_id: None, 45 | keylog: false, 46 | } 47 | } 48 | } 49 | 50 | /// Setup a QUIC connection to the provided address. 51 | async fn setup(opts: Options) -> Result { 52 | let keypair = Keypair::generate(); 53 | 54 | let tls_client_config = tls::make_client_config(&keypair, opts.peer_id, opts.keylog)?; 55 | let mut client_config = quinn::ClientConfig::new(Arc::new(tls_client_config)); 56 | let mut endpoint = quinn::Endpoint::client("0.0.0.0:0".parse().unwrap())?; 57 | let mut transport_config = quinn::TransportConfig::default(); 58 | transport_config.keep_alive_interval(Some(Duration::from_secs(1))); 59 | client_config.transport_config(Arc::new(transport_config)); 60 | 61 | endpoint.set_default_client_config(client_config); 62 | 63 | debug!("connecting to {}", opts.addr); 64 | let connect = endpoint.connect(opts.addr, "localhost")?; 65 | let connection = connect.await?; 66 | 67 | Ok(connection) 68 | } 69 | 70 | /// Stats about the transfer. 71 | #[derive(Debug, Clone, PartialEq)] 72 | pub struct Stats { 73 | /// The number of bytes transferred 74 | pub data_len: u64, 75 | /// The time it took to transfer the data 76 | pub elapsed: Duration, 77 | } 78 | 79 | impl Stats { 80 | /// Transfer rate in megabits per second 81 | pub fn mbits(&self) -> f64 { 82 | let data_len_bit = self.data_len * 8; 83 | data_len_bit as f64 / (1000. * 1000.) / self.elapsed.as_secs_f64() 84 | } 85 | } 86 | 87 | /// A verified stream of data coming from the provider 88 | /// 89 | /// We guarantee that the data is correct by incrementally verifying a hash 90 | #[repr(transparent)] 91 | #[derive(Debug)] 92 | pub struct DataStream(AsyncSliceDecoder); 93 | 94 | impl DataStream { 95 | fn new(inner: quinn::RecvStream, hash: Hash) -> Self { 96 | DataStream(AsyncSliceDecoder::new(inner, &hash.into(), 0, u64::MAX)) 97 | } 98 | 99 | async fn read_size(&mut self) -> io::Result { 100 | self.0.read_size().await 101 | } 102 | 103 | fn into_inner(self) -> quinn::RecvStream { 104 | self.0.into_inner() 105 | } 106 | } 107 | 108 | impl AsyncRead for DataStream { 109 | fn poll_read( 110 | mut self: std::pin::Pin<&mut Self>, 111 | cx: &mut std::task::Context<'_>, 112 | buf: &mut ReadBuf, 113 | ) -> std::task::Poll> { 114 | std::pin::Pin::new(&mut self.0).poll_read(cx, buf) 115 | } 116 | } 117 | 118 | /// Get a collection and all its blobs from a provider 119 | pub async fn run( 120 | hash: Hash, 121 | auth_token: AuthToken, 122 | opts: Options, 123 | on_connected: A, 124 | on_collection: B, 125 | mut on_blob: C, 126 | ) -> Result 127 | where 128 | A: FnOnce() -> FutA, 129 | FutA: Future>, 130 | B: FnOnce(&Collection) -> FutB, 131 | FutB: Future>, 132 | C: FnMut(Hash, DataStream, String) -> FutC, 133 | FutC: Future>, 134 | { 135 | let now = Instant::now(); 136 | let connection = setup(opts).await?; 137 | 138 | let (mut writer, mut reader) = connection.open_bi().await?; 139 | 140 | on_connected().await?; 141 | 142 | let mut out_buffer = BytesMut::zeroed(std::cmp::max( 143 | Request::POSTCARD_MAX_SIZE, 144 | Handshake::POSTCARD_MAX_SIZE, 145 | )); 146 | 147 | // 1. Send Handshake 148 | { 149 | debug!("sending handshake"); 150 | let handshake = Handshake::new(auth_token); 151 | let used = postcard::to_slice(&handshake, &mut out_buffer)?; 152 | write_lp(&mut writer, used).await?; 153 | } 154 | 155 | // 2. Send Request 156 | { 157 | debug!("sending request"); 158 | let req = Request { id: 1, name: hash }; 159 | 160 | let used = postcard::to_slice(&req, &mut out_buffer)?; 161 | write_lp(&mut writer, used).await?; 162 | } 163 | writer.finish().await?; 164 | drop(writer); 165 | 166 | // 3. Read response 167 | { 168 | debug!("reading response"); 169 | let mut in_buffer = BytesMut::with_capacity(1024); 170 | 171 | // track total amount of blob data transferred 172 | let mut data_len = 0; 173 | // read next message 174 | match read_lp_data(&mut reader, &mut in_buffer).await? { 175 | Some(response_buffer) => { 176 | let response: Response = postcard::from_bytes(&response_buffer)?; 177 | match response.data { 178 | // server is sending over a collection of blobs 179 | Res::FoundCollection { total_blobs_size } => { 180 | ensure!( 181 | total_blobs_size <= MAX_DATA_SIZE, 182 | "size too large: {} > {}", 183 | total_blobs_size, 184 | MAX_DATA_SIZE 185 | ); 186 | 187 | data_len = total_blobs_size; 188 | 189 | // read entire collection data into buffer 190 | let data = read_bao_encoded(&mut reader, hash).await?; 191 | 192 | // decode the collection 193 | let collection = Collection::from_bytes(&data)?; 194 | on_collection(&collection).await?; 195 | 196 | // expect to get blob data in the order they appear in the collection 197 | let mut remaining_size = total_blobs_size; 198 | for blob in collection.blobs { 199 | let mut blob_reader = 200 | handle_blob_response(blob.hash, reader, &mut in_buffer).await?; 201 | 202 | let size = blob_reader.read_size().await?; 203 | anyhow::ensure!( 204 | size <= MAX_DATA_SIZE, 205 | "size too large: {size} > {MAX_DATA_SIZE}" 206 | ); 207 | anyhow::ensure!( 208 | size <= remaining_size, 209 | "downloaded more than {total_blobs_size}" 210 | ); 211 | remaining_size -= size; 212 | let mut blob_reader = 213 | on_blob(blob.hash, blob_reader, blob.name).await?; 214 | 215 | if blob_reader.read_exact(&mut [0u8; 1]).await.is_ok() { 216 | bail!("`on_blob` callback did not fully read the blob content") 217 | } 218 | reader = blob_reader.into_inner(); 219 | } 220 | } 221 | 222 | // unexpected message 223 | Res::Found { .. } => { 224 | // we should only receive `Res::FoundCollection` or `Res::NotFound` from the 225 | // provider at this point in the exchange 226 | bail!("Unexpected message from provider. Ending transfer early."); 227 | } 228 | 229 | // data associated with the hash is not found 230 | Res::NotFound => { 231 | Err(anyhow!("data not found"))?; 232 | } 233 | } 234 | 235 | // Shut down the stream 236 | if let Some(chunk) = reader.read_chunk(8, false).await? { 237 | reader.stop(0u8.into()).ok(); 238 | error!("Received unexpected data from the provider: {chunk:?}"); 239 | } 240 | drop(reader); 241 | 242 | let elapsed = now.elapsed(); 243 | 244 | let stats = Stats { data_len, elapsed }; 245 | 246 | Ok(stats) 247 | } 248 | None => { 249 | bail!("provider closed stream"); 250 | } 251 | } 252 | } 253 | } 254 | 255 | /// Read next response, and if `Res::Found`, reads the next blob of data off the reader. 256 | /// 257 | /// Returns an `AsyncReader` 258 | /// The `AsyncReader` can be used to read the content. 259 | async fn handle_blob_response( 260 | hash: Hash, 261 | mut reader: quinn::RecvStream, 262 | buffer: &mut BytesMut, 263 | ) -> Result { 264 | match read_lp_data(&mut reader, buffer).await? { 265 | Some(response_buffer) => { 266 | let response: Response = postcard::from_bytes(&response_buffer)?; 267 | match response.data { 268 | // unexpected message 269 | Res::FoundCollection { .. } => Err(anyhow!( 270 | "Unexpected message from provider. Ending transfer early." 271 | ))?, 272 | // blob data not found 273 | Res::NotFound => Err(anyhow!("data for {} not found", hash))?, 274 | // next blob in collection will be sent over 275 | Res::Found => { 276 | assert!(buffer.is_empty()); 277 | let decoder = DataStream::new(reader, hash); 278 | Ok(decoder) 279 | } 280 | } 281 | } 282 | None => Err(anyhow!("server disconnected"))?, 283 | } 284 | } 285 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Send data over the internet. 2 | #![deny(missing_docs)] 3 | #![deny(rustdoc::broken_intra_doc_links)] 4 | pub mod blobs; 5 | pub mod get; 6 | pub mod progress; 7 | pub mod protocol; 8 | pub mod provider; 9 | 10 | mod tls; 11 | mod util; 12 | 13 | pub use tls::{Keypair, PeerId, PeerIdError, PublicKey, SecretKey, Signature}; 14 | pub use util::Hash; 15 | 16 | #[cfg(test)] 17 | mod tests { 18 | use std::{ 19 | net::SocketAddr, 20 | path::PathBuf, 21 | sync::{atomic::AtomicUsize, Arc}, 22 | time::Duration, 23 | }; 24 | 25 | use anyhow::{anyhow, Context, Result}; 26 | use rand::RngCore; 27 | use testdir::testdir; 28 | use tokio::fs; 29 | use tokio::io::{self, AsyncReadExt, AsyncWriteExt}; 30 | use tracing_subscriber::{prelude::*, EnvFilter}; 31 | 32 | use crate::protocol::AuthToken; 33 | use crate::provider::{create_collection, Event, Provider}; 34 | use crate::tls::PeerId; 35 | use crate::util::Hash; 36 | 37 | use super::*; 38 | 39 | #[tokio::test] 40 | async fn basics() -> Result<()> { 41 | transfer_data(vec![("hello_world", "hello world!".as_bytes().to_vec())]).await 42 | } 43 | 44 | #[tokio::test] 45 | async fn multi_file() -> Result<()> { 46 | let file_opts = vec![ 47 | ("1", 10), 48 | ("2", 1024), 49 | ("3", 1024 * 1024), 50 | // overkill, but it works! Just annoying to wait for 51 | // ("4", 1024 * 1024 * 90), 52 | ]; 53 | transfer_random_data(file_opts).await 54 | } 55 | 56 | #[tokio::test] 57 | async fn sizes() -> Result<()> { 58 | let sizes = [ 59 | 0, 60 | 10, 61 | 100, 62 | 1024, 63 | 1024 * 100, 64 | 1024 * 500, 65 | 1024 * 1024, 66 | 1024 * 1024 + 10, 67 | ]; 68 | 69 | for size in sizes { 70 | transfer_random_data(vec![("hello_world", size)]).await?; 71 | } 72 | 73 | Ok(()) 74 | } 75 | 76 | #[tokio::test] 77 | async fn empty_files() -> Result<()> { 78 | // try to transfer as many files as possible without hitting a limit 79 | // booo 400 is too small :( 80 | let num_files = 400; 81 | let mut file_opts = Vec::new(); 82 | for i in 0..num_files { 83 | file_opts.push((i.to_string(), 0)); 84 | } 85 | transfer_random_data(file_opts).await 86 | } 87 | 88 | #[tokio::test(flavor = "multi_thread")] 89 | async fn multiple_clients() -> Result<()> { 90 | let dir: PathBuf = testdir!(); 91 | let filename = "hello_world"; 92 | let path = dir.join(filename); 93 | let content = b"hello world!"; 94 | let addr = "127.0.0.1:0".parse().unwrap(); 95 | 96 | tokio::fs::write(&path, content).await?; 97 | // hash of the transfer file 98 | let data = tokio::fs::read(&path).await?; 99 | let (_, expect_hash) = abao::encode::outboard(&data); 100 | let expect_name = filename.to_string(); 101 | 102 | let (db, hash) = 103 | provider::create_collection(vec![provider::DataSource::File(path)]).await?; 104 | let provider = provider::Provider::builder(db).bind_addr(addr).spawn()?; 105 | 106 | async fn run_client( 107 | hash: Hash, 108 | token: AuthToken, 109 | file_hash: Hash, 110 | name: String, 111 | addr: SocketAddr, 112 | peer_id: PeerId, 113 | content: Vec, 114 | ) -> Result<()> { 115 | let opts = get::Options { 116 | addr, 117 | peer_id: Some(peer_id), 118 | keylog: true, 119 | }; 120 | let content = &content; 121 | let name = &name; 122 | get::run( 123 | hash, 124 | token, 125 | opts, 126 | || async { Ok(()) }, 127 | |_collection| async { Ok(()) }, 128 | |got_hash, mut reader, got_name| async move { 129 | assert_eq!(file_hash, got_hash); 130 | let mut got = Vec::new(); 131 | reader.read_to_end(&mut got).await?; 132 | assert_eq!(content, &got); 133 | assert_eq!(*name, got_name); 134 | 135 | Ok(reader) 136 | }, 137 | ) 138 | .await?; 139 | 140 | Ok(()) 141 | } 142 | 143 | let mut tasks = Vec::new(); 144 | for _i in 0..3 { 145 | tasks.push(tokio::task::spawn(run_client( 146 | hash, 147 | provider.auth_token(), 148 | expect_hash.into(), 149 | expect_name.clone(), 150 | provider.listen_addr(), 151 | provider.peer_id(), 152 | content.to_vec(), 153 | ))); 154 | } 155 | 156 | futures::future::join_all(tasks).await; 157 | 158 | Ok(()) 159 | } 160 | 161 | // Run the test creating random data for each blob, using the size specified by the file 162 | // options 163 | async fn transfer_random_data(file_opts: Vec<(S, usize)>) -> Result<()> 164 | where 165 | S: Into + std::fmt::Debug + std::cmp::PartialEq, 166 | { 167 | let file_opts = file_opts 168 | .into_iter() 169 | .map(|(name, size)| { 170 | let mut content = vec![0u8; size]; 171 | rand::thread_rng().fill_bytes(&mut content); 172 | (name, content) 173 | }) 174 | .collect(); 175 | transfer_data(file_opts).await 176 | } 177 | 178 | // Run the test for a vec of filenames and blob data 179 | async fn transfer_data(file_opts: Vec<(S, Vec)>) -> Result<()> 180 | where 181 | S: Into + std::fmt::Debug + std::cmp::PartialEq, 182 | { 183 | let dir: PathBuf = testdir!(); 184 | 185 | // create and save files 186 | let mut files = Vec::new(); 187 | let mut expects = Vec::new(); 188 | let num_blobs = file_opts.len(); 189 | 190 | for opt in file_opts.into_iter() { 191 | let (name, data) = opt; 192 | 193 | let name = name.into(); 194 | let path = dir.join(name.clone()); 195 | // get expected hash of file 196 | let (_, hash) = abao::encode::outboard(&data); 197 | let hash = Hash::from(hash); 198 | 199 | tokio::fs::write(&path, data).await?; 200 | files.push(provider::DataSource::File(path.clone())); 201 | 202 | // keep track of expected values 203 | expects.push((name, path, hash)); 204 | } 205 | 206 | let (db, collection_hash) = provider::create_collection(files).await?; 207 | 208 | let addr = "127.0.0.1:0".parse().unwrap(); 209 | let provider = provider::Provider::builder(db).bind_addr(addr).spawn()?; 210 | let mut provider_events = provider.subscribe(); 211 | let events_task = tokio::task::spawn(async move { 212 | let mut events = Vec::new(); 213 | while let Ok(event) = provider_events.recv().await { 214 | match event { 215 | Event::TransferCompleted { .. } | Event::TransferAborted { .. } => { 216 | events.push(event); 217 | break; 218 | } 219 | _ => events.push(event), 220 | } 221 | } 222 | events 223 | }); 224 | 225 | let opts = get::Options { 226 | addr: provider.listen_addr(), 227 | peer_id: Some(provider.peer_id()), 228 | keylog: true, 229 | }; 230 | 231 | let i = AtomicUsize::new(0); 232 | let expects = Arc::new(expects); 233 | 234 | get::run( 235 | collection_hash, 236 | provider.auth_token(), 237 | opts, 238 | || async { Ok(()) }, 239 | |collection| { 240 | assert_eq!(collection.blobs.len(), num_blobs); 241 | async { Ok(()) } 242 | }, 243 | |got_hash, mut reader, got_name| { 244 | let i = &i; 245 | let expects = expects.clone(); 246 | async move { 247 | let iv = i.load(std::sync::atomic::Ordering::SeqCst); 248 | let (expect_name, path, expect_hash) = expects.get(iv).unwrap(); 249 | assert_eq!(*expect_hash, got_hash); 250 | let expect = tokio::fs::read(&path).await?; 251 | let mut got = Vec::new(); 252 | reader.read_to_end(&mut got).await?; 253 | assert_eq!(expect, got); 254 | assert_eq!(*expect_name, got_name); 255 | i.fetch_add(1, std::sync::atomic::Ordering::SeqCst); 256 | Ok(reader) 257 | } 258 | }, 259 | ) 260 | .await?; 261 | 262 | // We have to wait for the completed event before shutting down the provider. 263 | let events = tokio::time::timeout(Duration::from_secs(30), events_task) 264 | .await 265 | .expect("duration expired") 266 | .expect("events task failed"); 267 | provider.shutdown(); 268 | provider.await?; 269 | 270 | assert_events(events); 271 | 272 | Ok(()) 273 | } 274 | 275 | fn assert_events(events: Vec) { 276 | assert_eq!(events.len(), 3); 277 | assert!(matches!(events[0], Event::ClientConnected { .. })); 278 | assert!(matches!(events[1], Event::RequestReceived { .. })); 279 | assert!(matches!(events[2], Event::TransferCompleted { .. })); 280 | } 281 | 282 | fn setup_logging() { 283 | tracing_subscriber::registry() 284 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) 285 | .with(EnvFilter::from_default_env()) 286 | .try_init() 287 | .ok(); 288 | } 289 | 290 | #[tokio::test] 291 | async fn test_server_close() { 292 | // Prepare a Provider transferring a file. 293 | setup_logging(); 294 | let dir = testdir!(); 295 | let src = dir.join("src"); 296 | fs::write(&src, "hello there").await.unwrap(); 297 | let (db, hash) = create_collection(vec![src.into()]).await.unwrap(); 298 | let mut provider = Provider::builder(db) 299 | .bind_addr("127.0.0.1:0".parse().unwrap()) 300 | .spawn() 301 | .unwrap(); 302 | let auth_token = provider.auth_token(); 303 | let provider_addr = provider.listen_addr(); 304 | 305 | // This tasks closes the connection on the provider side as soon as the transfer 306 | // completes. 307 | let supervisor = tokio::spawn(async move { 308 | let mut events = provider.subscribe(); 309 | loop { 310 | tokio::select! { 311 | biased; 312 | res = &mut provider => break res.context("provider failed"), 313 | maybe_event = events.recv() => { 314 | match maybe_event { 315 | Ok(event) => { 316 | match event { 317 | Event::TransferCompleted { .. } => provider.shutdown(), 318 | Event::TransferAborted { .. } => { 319 | break Err(anyhow!("transfer aborted")); 320 | } 321 | _ => (), 322 | } 323 | } 324 | Err(err) => break Err(anyhow!("event failed: {err:#}")), 325 | } 326 | } 327 | } 328 | } 329 | }); 330 | 331 | get::run( 332 | hash, 333 | auth_token, 334 | get::Options { 335 | addr: provider_addr, 336 | peer_id: None, 337 | keylog: true, 338 | }, 339 | || async move { Ok(()) }, 340 | |_collection| async move { Ok(()) }, 341 | |_hash, mut stream, _name| async move { 342 | io::copy(&mut stream, &mut io::sink()).await?; 343 | Ok(stream) 344 | }, 345 | ) 346 | .await 347 | .unwrap(); 348 | 349 | // Unwrap the JoinHandle, then the result of the Provider 350 | tokio::time::timeout(Duration::from_secs(10), supervisor) 351 | .await 352 | .expect("supervisor timeout") 353 | .expect("supervisor failed") 354 | .expect("supervisor error"); 355 | } 356 | 357 | #[tokio::test] 358 | async fn test_blob_reader_partial() -> Result<()> { 359 | // Prepare a Provider transferring a file. 360 | let dir = testdir!(); 361 | let src0 = dir.join("src0"); 362 | let src1 = dir.join("src1"); 363 | { 364 | let content = vec![1u8; 1000]; 365 | let mut f = tokio::fs::File::create(&src0).await?; 366 | for _ in 0..10 { 367 | f.write_all(&content).await?; 368 | } 369 | } 370 | fs::write(&src1, "hello world").await?; 371 | let (db, hash) = create_collection(vec![src0.into(), src1.into()]).await?; 372 | let provider = Provider::builder(db) 373 | .bind_addr("127.0.0.1:0".parse().unwrap()) 374 | .spawn()?; 375 | let auth_token = provider.auth_token(); 376 | let provider_addr = provider.listen_addr(); 377 | 378 | let timeout = tokio::time::timeout( 379 | std::time::Duration::from_secs(10), 380 | get::run( 381 | hash, 382 | auth_token, 383 | get::Options { 384 | addr: provider_addr, 385 | peer_id: None, 386 | keylog: true, 387 | }, 388 | || async move { Ok(()) }, 389 | |_collection| async move { Ok(()) }, 390 | |_hash, stream, _name| async move { 391 | // evil: do nothing with the stream! 392 | Ok(stream) 393 | }, 394 | ), 395 | ) 396 | .await; 397 | provider.shutdown(); 398 | 399 | let err = timeout.expect( 400 | "`get` function is hanging, make sure we are handling misbehaving `on_blob` functions", 401 | ); 402 | 403 | err.expect_err("expected an error when passing in a misbehaving `on_blob` function"); 404 | Ok(()) 405 | } 406 | } 407 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, net::SocketAddr, path::PathBuf, str::FromStr}; 2 | 3 | use anyhow::{bail, Context, Result}; 4 | use clap::{Parser, Subcommand}; 5 | use console::style; 6 | use indicatif::{ 7 | HumanBytes, HumanDuration, ProgressBar, ProgressDrawTarget, ProgressState, ProgressStyle, 8 | }; 9 | use sendme::protocol::AuthToken; 10 | use sendme::provider::Ticket; 11 | use tokio::io::AsyncWriteExt; 12 | use tokio::sync::Mutex; 13 | use tracing_subscriber::{prelude::*, EnvFilter}; 14 | 15 | use sendme::{get, provider, Hash, Keypair, PeerId}; 16 | 17 | #[derive(Parser, Debug, Clone)] 18 | #[clap(version, about, long_about = None)] 19 | #[clap(about = "Send data.")] 20 | struct Cli { 21 | #[clap(subcommand)] 22 | command: Commands, 23 | } 24 | 25 | #[derive(Subcommand, Debug, Clone)] 26 | #[allow(clippy::large_enum_variant)] 27 | enum Commands { 28 | /// Serve the data from the given path. If it is a folder, all files in that folder will be served. If none is specified reads from STDIN. 29 | #[clap(about = "Serve the data from the given path")] 30 | Provide { 31 | path: Option, 32 | #[clap(long, short)] 33 | /// Optional port, defaults to 127.0.01:4433. 34 | #[clap(long, short)] 35 | addr: Option, 36 | /// Auth token, defaults to random generated. 37 | #[clap(long)] 38 | auth_token: Option, 39 | /// If this path is provided and it exists, the private key is read from this file and used, if it does not exist the private key will be persisted to this location. 40 | #[clap(long)] 41 | key: Option, 42 | /// Log SSL pre-master key to file in SSLKEYLOGFILE environment variable. 43 | #[clap(long)] 44 | keylog: bool, 45 | }, 46 | /// Fetch some data by hash. 47 | #[clap(about = "Fetch the data from the hash")] 48 | Get { 49 | /// The root hash to retrieve. 50 | hash: Blake3Cid, 51 | /// PeerId of the provider. 52 | #[clap(long, short)] 53 | peer: PeerId, 54 | /// The authentication token to present to the server. 55 | #[clap(long)] 56 | token: String, 57 | /// Optional address of the provider, defaults to 127.0.0.1:4433. 58 | #[clap(long, short)] 59 | addr: Option, 60 | /// Optional path to a new directory in which to save the file(s). If none is specified writes the data to STDOUT. 61 | #[clap(long, short)] 62 | out: Option, 63 | /// Log SSL pre-master key to file in SSLKEYLOGFILE environment variable. 64 | #[clap(long)] 65 | keylog: bool, 66 | }, 67 | /// Fetches some data from a ticket, 68 | /// 69 | /// The ticket contains all hash, authentication and connection information to connect 70 | /// to the provider. It is a simpler, but slightly less flexible alternative to the 71 | /// `get` subcommand. 72 | #[clap( 73 | about = "Fetch the data using a ticket for all provider information and authentication." 74 | )] 75 | GetTicket { 76 | /// Optional path to a new directory in which to save the file(s). If none is specified writes the data to STDOUT. 77 | #[clap(long, short)] 78 | out: Option, 79 | /// Ticket containing everything to retrieve a hash from provider. 80 | ticket: Ticket, 81 | /// Log SSL pre-master key to file in SSLKEYLOGFILE environment variable. 82 | #[clap(long)] 83 | keylog: bool, 84 | }, 85 | } 86 | 87 | // Note about writing to STDOUT vs STDERR 88 | // Looking at https://unix.stackexchange.com/questions/331611/do-progress-reports-logging-information-belong-on-stderr-or-stdout 89 | // it is a little complicated. 90 | // The current setup is to write all progress information to STDERR and all data to STDOUT. 91 | 92 | struct OutWriter { 93 | stderr: Mutex, 94 | } 95 | 96 | impl OutWriter { 97 | pub fn new() -> Self { 98 | let stderr = tokio::io::stderr(); 99 | Self { 100 | stderr: Mutex::new(stderr), 101 | } 102 | } 103 | } 104 | 105 | impl OutWriter { 106 | pub async fn println(&self, content: impl AsRef<[u8]>) { 107 | let stderr = &mut *self.stderr.lock().await; 108 | stderr.write_all(content.as_ref()).await.unwrap(); 109 | stderr.write_all(b"\n").await.unwrap(); 110 | } 111 | } 112 | 113 | #[repr(transparent)] 114 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 115 | struct Blake3Cid(Hash); 116 | 117 | const CID_PREFIX: [u8; 4] = [ 118 | 0x01, // version 119 | 0x55, // raw codec 120 | 0x1e, // hash function, blake3 121 | 0x20, // hash size, 32 bytes 122 | ]; 123 | 124 | impl Blake3Cid { 125 | pub fn new(hash: Hash) -> Self { 126 | Blake3Cid(hash) 127 | } 128 | 129 | pub fn as_hash(&self) -> &Hash { 130 | &self.0 131 | } 132 | 133 | pub fn as_bytes(&self) -> [u8; 36] { 134 | let hash: [u8; 32] = self.0.as_ref().try_into().unwrap(); 135 | let mut res = [0u8; 36]; 136 | res[0..4].copy_from_slice(&CID_PREFIX); 137 | res[4..36].copy_from_slice(&hash); 138 | res 139 | } 140 | 141 | pub fn from_bytes(bytes: &[u8]) -> anyhow::Result { 142 | anyhow::ensure!( 143 | bytes.len() == 36, 144 | "invalid cid length, expected 36, got {}", 145 | bytes.len() 146 | ); 147 | anyhow::ensure!(bytes[0..4] == CID_PREFIX, "invalid cid prefix"); 148 | let mut hash = [0u8; 32]; 149 | hash.copy_from_slice(&bytes[4..36]); 150 | Ok(Blake3Cid(Hash::from(hash))) 151 | } 152 | } 153 | 154 | impl fmt::Display for Blake3Cid { 155 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 156 | // result will be 58 bytes plus prefix 157 | let mut res = [b'b'; 59]; 158 | // write the encoded bytes 159 | data_encoding::BASE32_NOPAD.encode_mut(&self.as_bytes(), &mut res[1..]); 160 | // convert to string, this is guaranteed to succeed 161 | let t = std::str::from_utf8_mut(res.as_mut()).unwrap(); 162 | // hack since data_encoding doesn't have BASE32LOWER_NOPAD as a const 163 | t.make_ascii_lowercase(); 164 | // write the str, no allocations 165 | f.write_str(t) 166 | } 167 | } 168 | 169 | impl FromStr for Blake3Cid { 170 | type Err = anyhow::Error; 171 | 172 | fn from_str(s: &str) -> Result { 173 | let sb = s.as_bytes(); 174 | if sb.len() == 59 && sb[0] == b'b' { 175 | // this is a base32 encoded cid, we can decode it directly 176 | let mut t = [0u8; 58]; 177 | t.copy_from_slice(&sb[1..]); 178 | // hack since data_encoding doesn't have BASE32LOWER_NOPAD as a const 179 | std::str::from_utf8_mut(t.as_mut()) 180 | .unwrap() 181 | .make_ascii_uppercase(); 182 | // decode the bytes 183 | let mut res = [0u8; 36]; 184 | data_encoding::BASE32_NOPAD 185 | .decode_mut(&t, &mut res) 186 | .map_err(|_e| anyhow::anyhow!("invalid base32"))?; 187 | // convert to cid, this will check the prefix 188 | Self::from_bytes(&res) 189 | } else { 190 | // if we want to support all the weird multibase prefixes, we have no choice 191 | // but to use the multibase crate 192 | let (_base, bytes) = multibase::decode(s)?; 193 | Self::from_bytes(bytes.as_ref()) 194 | } 195 | } 196 | } 197 | 198 | const PROGRESS_STYLE: &str = 199 | "{msg}\n{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})"; 200 | 201 | #[tokio::main(flavor = "multi_thread")] 202 | async fn main() -> Result<()> { 203 | tracing_subscriber::registry() 204 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) 205 | .with(EnvFilter::from_default_env()) 206 | .init(); 207 | 208 | let cli = Cli::parse(); 209 | 210 | match cli.command { 211 | Commands::Get { 212 | hash, 213 | peer, 214 | token, 215 | addr, 216 | out, 217 | keylog, 218 | } => { 219 | let mut opts = get::Options { 220 | peer_id: Some(peer), 221 | keylog, 222 | ..Default::default() 223 | }; 224 | if let Some(addr) = addr { 225 | opts.addr = addr; 226 | } 227 | let token = 228 | AuthToken::from_str(&token).context("Wrong format for authentication token")?; 229 | tokio::select! { 230 | biased; 231 | res = get_interactive(*hash.as_hash(), opts, token, out) => { 232 | res 233 | } 234 | _ = tokio::signal::ctrl_c() => { 235 | println!("Ending transfer early..."); 236 | Ok(()) 237 | } 238 | } 239 | } 240 | Commands::GetTicket { 241 | out, 242 | ticket, 243 | keylog, 244 | } => { 245 | let Ticket { 246 | hash, 247 | peer, 248 | addr, 249 | token, 250 | } = ticket; 251 | let opts = get::Options { 252 | addr, 253 | peer_id: Some(peer), 254 | keylog, 255 | }; 256 | tokio::select! { 257 | biased; 258 | res = get_interactive(hash, opts, token, out) => { 259 | res 260 | } 261 | _ = tokio::signal::ctrl_c() => { 262 | println!("Ending transfer early..."); 263 | Ok(()) 264 | } 265 | } 266 | } 267 | Commands::Provide { 268 | path, 269 | addr, 270 | auth_token, 271 | key, 272 | keylog, 273 | } => { 274 | tokio::select! { 275 | biased; 276 | res = provide_interactive(path, addr, auth_token, key, keylog) => { 277 | res 278 | } 279 | _ = tokio::signal::ctrl_c() => { 280 | println!("\nShutting down provider..."); 281 | Ok(()) 282 | } 283 | } 284 | } 285 | } 286 | } 287 | 288 | async fn provide_interactive( 289 | path: Option, 290 | addr: Option, 291 | auth_token: Option, 292 | key: Option, 293 | keylog: bool, 294 | ) -> Result<()> { 295 | let out_writer = OutWriter::new(); 296 | let keypair = get_keypair(key).await?; 297 | 298 | let mut tmp_path = None; 299 | 300 | let sources = if let Some(path) = path { 301 | out_writer 302 | .println(format!("Reading {}", path.display())) 303 | .await; 304 | if path.is_dir() { 305 | let mut paths = Vec::new(); 306 | let mut iter = tokio::fs::read_dir(&path).await?; 307 | while let Some(el) = iter.next_entry().await? { 308 | if el.path().is_file() { 309 | paths.push(el.path().into()); 310 | } 311 | } 312 | paths 313 | } else if path.is_file() { 314 | vec![path.into()] 315 | } else { 316 | bail!("path must be either a Directory or a File"); 317 | } 318 | } else { 319 | // Store STDIN content into a temporary file 320 | let (file, path) = tempfile::NamedTempFile::new()?.into_parts(); 321 | let mut file = tokio::fs::File::from_std(file); 322 | let path_buf = path.to_path_buf(); 323 | tmp_path = Some(path); 324 | tokio::io::copy(&mut tokio::io::stdin(), &mut file).await?; 325 | vec![path_buf.into()] 326 | }; 327 | 328 | let (db, hash) = provider::create_collection(sources).await?; 329 | 330 | println!("Collection: {}\n", Blake3Cid::new(hash)); 331 | for (_, path, size) in db.blobs() { 332 | println!("- {}: {} bytes", path.display(), size); 333 | } 334 | println!(); 335 | let mut builder = provider::Provider::builder(db) 336 | .keypair(keypair) 337 | .keylog(keylog); 338 | if let Some(addr) = addr { 339 | builder = builder.bind_addr(addr); 340 | } 341 | if let Some(ref encoded) = auth_token { 342 | let auth_token = AuthToken::from_str(encoded)?; 343 | builder = builder.auth_token(auth_token); 344 | } 345 | let provider = builder.spawn()?; 346 | 347 | out_writer 348 | .println(format!("PeerID: {}", provider.peer_id())) 349 | .await; 350 | out_writer 351 | .println(format!("Auth token: {}", provider.auth_token())) 352 | .await; 353 | out_writer 354 | .println(format!("All-in-one ticket: {}", provider.ticket(hash))) 355 | .await; 356 | provider.await?; 357 | 358 | // Drop tempath to signal it can be destroyed 359 | drop(tmp_path); 360 | Ok(()) 361 | } 362 | 363 | async fn get_keypair(key: Option) -> Result { 364 | match key { 365 | Some(key_path) => { 366 | if key_path.exists() { 367 | let keystr = tokio::fs::read(key_path).await?; 368 | let keypair = Keypair::try_from_openssh(keystr)?; 369 | Ok(keypair) 370 | } else { 371 | let keypair = Keypair::generate(); 372 | let ser_key = keypair.to_openssh()?; 373 | tokio::fs::write(key_path, ser_key).await?; 374 | Ok(keypair) 375 | } 376 | } 377 | None => { 378 | // No path provided, just generate one 379 | Ok(Keypair::generate()) 380 | } 381 | } 382 | } 383 | 384 | async fn get_interactive( 385 | hash: Hash, 386 | opts: get::Options, 387 | token: AuthToken, 388 | out: Option, 389 | ) -> Result<()> { 390 | let out_writer = OutWriter::new(); 391 | out_writer 392 | .println(format!("Fetching: {}", Blake3Cid::new(hash))) 393 | .await; 394 | 395 | out_writer 396 | .println(format!("{} Connecting ...", style("[1/3]").bold().dim())) 397 | .await; 398 | 399 | let pb = ProgressBar::hidden(); 400 | pb.enable_steady_tick(std::time::Duration::from_millis(50)); 401 | pb.set_style( 402 | ProgressStyle::with_template(PROGRESS_STYLE) 403 | .unwrap() 404 | .with_key( 405 | "eta", 406 | |state: &ProgressState, w: &mut dyn std::fmt::Write| { 407 | write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap() 408 | }, 409 | ) 410 | .progress_chars("#>-"), 411 | ); 412 | 413 | let on_connected = || { 414 | let out_writer = &out_writer; 415 | async move { 416 | out_writer 417 | .println(format!("{} Requesting ...", style("[2/3]").bold().dim())) 418 | .await; 419 | Ok(()) 420 | } 421 | }; 422 | let on_collection = |collection: &sendme::blobs::Collection| { 423 | let pb = &pb; 424 | let out_writer = &out_writer; 425 | let name = collection.name().to_string(); 426 | let total_entries = collection.total_entries(); 427 | let size = collection.total_blobs_size(); 428 | async move { 429 | out_writer 430 | .println(format!( 431 | "{} Downloading {name}...", 432 | style("[3/3]").bold().dim() 433 | )) 434 | .await; 435 | out_writer 436 | .println(format!( 437 | " {total_entries} file(s) with total transfer size {}", 438 | HumanBytes(size) 439 | )) 440 | .await; 441 | pb.set_length(size); 442 | pb.reset(); 443 | pb.set_draw_target(ProgressDrawTarget::stderr()); 444 | 445 | Ok(()) 446 | } 447 | }; 448 | 449 | let on_blob = |hash: Hash, mut reader, name: String| { 450 | let out = &out; 451 | let pb = &pb; 452 | async move { 453 | let name = if name.is_empty() { 454 | hash.to_string() 455 | } else { 456 | name 457 | }; 458 | pb.set_message(format!("Receiving '{name}'...")); 459 | 460 | // Wrap the reader to show progress. 461 | let mut wrapped_reader = pb.wrap_async_read(&mut reader); 462 | 463 | if let Some(ref outpath) = out { 464 | tokio::fs::create_dir_all(outpath) 465 | .await 466 | .context("Unable to create directory {outpath}")?; 467 | let dirpath = std::path::PathBuf::from(outpath); 468 | let filepath = dirpath.join(name); 469 | 470 | // Create temp file 471 | let (temp_file, dup) = tokio::task::spawn_blocking(|| { 472 | let temp_file = tempfile::Builder::new() 473 | .prefix("sendme-tmp-") 474 | .tempfile_in(dirpath) 475 | .context("Failed to create temporary output file")?; 476 | let dup = temp_file.as_file().try_clone()?; 477 | Ok::<_, anyhow::Error>((temp_file, dup)) 478 | }) 479 | .await??; 480 | 481 | let file = tokio::fs::File::from_std(dup); 482 | let mut file_buf = tokio::io::BufWriter::new(file); 483 | tokio::io::copy(&mut wrapped_reader, &mut file_buf).await?; 484 | 485 | // Rename temp file, to target name 486 | let filepath2 = filepath.clone(); 487 | tokio::task::spawn_blocking(|| temp_file.persist(filepath2)) 488 | .await? 489 | .context("Failed to write output file")?; 490 | } else { 491 | // Write to OUT_WRITER 492 | let mut stdout = tokio::io::stdout(); 493 | tokio::io::copy(&mut wrapped_reader, &mut stdout).await?; 494 | } 495 | 496 | Ok(reader) 497 | } 498 | }; 499 | let stats = get::run(hash, token, opts, on_connected, on_collection, on_blob).await?; 500 | 501 | pb.finish_and_clear(); 502 | out_writer 503 | .println(format!("Done in {}", HumanDuration(stats.elapsed))) 504 | .await; 505 | 506 | Ok(()) 507 | } 508 | -------------------------------------------------------------------------------- /src/progress.rs: -------------------------------------------------------------------------------- 1 | //! Generic utilities to track progress of data transfers. 2 | //! 3 | //! This is not especially specific to sendme but can be helpful together with it. The 4 | //! [`ProgressEmitter`] has a [`ProgressEmitter::wrap_async_read`] method which can make it 5 | //! easy to track process of transfers. 6 | //! 7 | //! However based on your environment there might also be better choices for this, e.g. very 8 | //! similar and more advanced functionality is available in the `indicatif` crate for 9 | //! terminal applications. 10 | 11 | use std::pin::Pin; 12 | use std::sync::atomic::Ordering; 13 | use std::sync::Arc; 14 | use std::task::Poll; 15 | 16 | use portable_atomic::{AtomicU16, AtomicU64}; 17 | use tokio::io::{self, AsyncRead}; 18 | use tokio::sync::broadcast; 19 | 20 | /// A generic progress event emitter. 21 | /// 22 | /// It is created with a total value to reach and at which increments progress should be 23 | /// emitted. E.g. when downloading a file of any size but you want percentage increments 24 | /// you would create `ProgressEmitter::new(file_size_in_bytes, 100)` and 25 | /// [`ProgressEmitter::subscribe`] will yield numbers `1..100` only. 26 | /// 27 | /// Progress is made by calling [`ProgressEmitter::inc`], which can be implicitly done by 28 | /// [`ProgressEmitter::wrap_async_read`]. 29 | #[derive(Debug, Clone)] 30 | pub struct ProgressEmitter { 31 | inner: Arc, 32 | } 33 | 34 | impl ProgressEmitter { 35 | /// Creates a new emitter. 36 | /// 37 | /// The emitter expects to see *total* being added via [`ProgressEmitter::inc`] and will 38 | /// emit *steps* updates. 39 | pub fn new(total: u64, steps: u16) -> Self { 40 | let (tx, _rx) = broadcast::channel(16); 41 | Self { 42 | inner: Arc::new(InnerProgressEmitter { 43 | total: AtomicU64::new(total), 44 | count: AtomicU64::new(0), 45 | steps, 46 | last_step: AtomicU16::new(0u16), 47 | tx, 48 | }), 49 | } 50 | } 51 | 52 | /// Sets a new total in case you did not now the total up front. 53 | pub fn set_total(&self, value: u64) { 54 | self.inner.set_total(value) 55 | } 56 | 57 | /// Returns a receiver that gets incremental values. 58 | /// 59 | /// The values yielded depend on *steps* passed to [`ProgressEmitter::new`]: it will go 60 | /// from `1..steps`. 61 | pub fn subscribe(&self) -> broadcast::Receiver { 62 | self.inner.subscribe() 63 | } 64 | 65 | /// Increments the progress by *amount*. 66 | pub fn inc(&self, amount: u64) { 67 | self.inner.inc(amount); 68 | } 69 | 70 | /// Wraps an [`AsyncRead`] which implicitly calls [`ProgressEmitter::inc`]. 71 | pub fn wrap_async_read(&self, read: R) -> ProgressAsyncReader { 72 | ProgressAsyncReader { 73 | emitter: self.clone(), 74 | inner: read, 75 | } 76 | } 77 | } 78 | 79 | /// The actual implementation. 80 | /// 81 | /// This exists so it can be Arc'd into [`ProgressEmitter`] and we can easily have multiple 82 | /// `Send + Sync` copies of it. This is used by the 83 | /// [`ProgressAsyncReader`] to update the progress without intertwining 84 | /// lifetimes. 85 | #[derive(Debug)] 86 | struct InnerProgressEmitter { 87 | total: AtomicU64, 88 | count: AtomicU64, 89 | steps: u16, 90 | last_step: AtomicU16, 91 | tx: broadcast::Sender, 92 | } 93 | 94 | impl InnerProgressEmitter { 95 | fn inc(&self, amount: u64) { 96 | let prev_count = self.count.fetch_add(amount, Ordering::Relaxed); 97 | let count = prev_count + amount; 98 | let total = self.total.load(Ordering::Relaxed); 99 | let step = (std::cmp::min(count, total) * u64::from(self.steps) / total) as u16; 100 | let last_step = self.last_step.swap(step, Ordering::Relaxed); 101 | if step > last_step { 102 | self.tx.send(step).ok(); 103 | } 104 | } 105 | 106 | fn set_total(&self, value: u64) { 107 | self.total.store(value, Ordering::Relaxed); 108 | } 109 | 110 | fn subscribe(&self) -> broadcast::Receiver { 111 | self.tx.subscribe() 112 | } 113 | } 114 | 115 | /// A wrapper around [`AsyncRead`] which increments a [`ProgressEmitter`]. 116 | /// 117 | /// This can be used just like the underlying [`AsyncRead`] but increments progress for each 118 | /// byte read. Create this using [`ProgressEmitter::wrap_async_read`]. 119 | #[derive(Debug)] 120 | pub struct ProgressAsyncReader { 121 | emitter: ProgressEmitter, 122 | inner: R, 123 | } 124 | 125 | impl AsyncRead for ProgressAsyncReader 126 | where 127 | R: AsyncRead + Unpin, 128 | { 129 | fn poll_read( 130 | mut self: Pin<&mut Self>, 131 | cx: &mut std::task::Context<'_>, 132 | buf: &mut io::ReadBuf<'_>, 133 | ) -> Poll> { 134 | let prev_len = buf.filled().len() as u64; 135 | match Pin::new(&mut self.inner).poll_read(cx, buf) { 136 | Poll::Ready(val) => { 137 | let new_len = buf.filled().len() as u64; 138 | self.emitter.inc(new_len - prev_len); 139 | Poll::Ready(val) 140 | } 141 | Poll::Pending => Poll::Pending, 142 | } 143 | } 144 | } 145 | 146 | #[cfg(test)] 147 | mod tests { 148 | use tokio::sync::broadcast::error::TryRecvError; 149 | 150 | use super::*; 151 | 152 | #[test] 153 | fn test_inc() { 154 | let progress = ProgressEmitter::new(160, 16); 155 | let mut rx = progress.subscribe(); 156 | 157 | progress.inc(1); 158 | assert_eq!(progress.inner.count.load(Ordering::Relaxed), 1); 159 | let res = rx.try_recv(); 160 | assert!(matches!(res, Err(TryRecvError::Empty))); 161 | 162 | progress.inc(9); 163 | assert_eq!(progress.inner.count.load(Ordering::Relaxed), 10); 164 | let res = rx.try_recv(); 165 | assert!(matches!(res, Ok(1))); 166 | 167 | progress.inc(30); 168 | assert_eq!(progress.inner.count.load(Ordering::Relaxed), 40); 169 | let res = rx.try_recv(); 170 | assert!(matches!(res, Ok(4))); 171 | 172 | progress.inc(120); 173 | assert_eq!(progress.inner.count.load(Ordering::Relaxed), 160); 174 | let res = rx.try_recv(); 175 | assert!(matches!(res, Ok(16))); 176 | } 177 | 178 | #[tokio::test] 179 | async fn test_async_reader() { 180 | // Note that the broadcast::Receiver has 16 slots, pushing more into them without 181 | // consuming will result in a (Try)RecvError::Lagged. 182 | let progress = ProgressEmitter::new(160, 16); 183 | let mut rx = progress.subscribe(); 184 | 185 | let data = [1u8; 100]; 186 | let mut wrapped_reader = progress.wrap_async_read(&data[..]); 187 | io::copy(&mut wrapped_reader, &mut io::sink()) 188 | .await 189 | .unwrap(); 190 | 191 | // Most likely this test will invoke a single AsyncRead::poll_read and thus only a 192 | // single event will be emitted. But we can not really rely on this and can only 193 | // check the last value. 194 | let mut current = 0; 195 | while let Ok(val) = rx.try_recv() { 196 | current = val; 197 | } 198 | assert_eq!(current, 10); 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /src/protocol.rs: -------------------------------------------------------------------------------- 1 | //! Protocol for communication between provider and client. 2 | use std::fmt::Display; 3 | use std::io; 4 | use std::str::FromStr; 5 | 6 | use abao::decode::AsyncSliceDecoder; 7 | use anyhow::{ensure, Result}; 8 | use bytes::{Bytes, BytesMut}; 9 | use postcard::experimental::max_size::MaxSize; 10 | use quinn::VarInt; 11 | use serde::{Deserialize, Serialize}; 12 | use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; 13 | use tracing::debug; 14 | 15 | use crate::util::{self, Hash}; 16 | 17 | /// Maximum message size is limited to 100MiB for now. 18 | const MAX_MESSAGE_SIZE: usize = 1024 * 1024 * 100; 19 | 20 | /// Protocol version 21 | pub const VERSION: u64 = 1; 22 | 23 | #[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone, MaxSize)] 24 | pub(crate) struct Handshake { 25 | pub version: u64, 26 | pub token: AuthToken, 27 | } 28 | 29 | impl Handshake { 30 | pub fn new(token: AuthToken) -> Self { 31 | Self { 32 | version: VERSION, 33 | token, 34 | } 35 | } 36 | } 37 | 38 | #[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone, MaxSize)] 39 | pub(crate) struct Request { 40 | pub id: u64, 41 | /// blake3 hash 42 | pub name: Hash, 43 | } 44 | 45 | #[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone)] 46 | pub(crate) struct Response { 47 | pub id: u64, 48 | pub data: Res, 49 | } 50 | 51 | #[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone)] 52 | pub(crate) enum Res { 53 | NotFound, 54 | // If found, a stream of bao data is sent as next message. 55 | Found, 56 | /// Indicates that the given hash referred to a collection of multiple blobs 57 | /// A stream of boa data that decodes to a `Collection` is sent as the next message, 58 | /// followed by `Res::Found` responses, send in the order indicated in the `Collection`. 59 | FoundCollection { 60 | /// The size of the raw data we are planning to transfer 61 | total_blobs_size: u64, 62 | }, 63 | } 64 | 65 | /// Write the given data to the provider sink, with a unsigned varint length prefix. 66 | pub(crate) async fn write_lp(writer: &mut W, data: &[u8]) -> Result<()> { 67 | ensure!( 68 | data.len() < MAX_MESSAGE_SIZE, 69 | "sending message is too large" 70 | ); 71 | 72 | // send length prefix 73 | let data_len = data.len() as u64; 74 | writer.write_u64_le(data_len).await?; 75 | 76 | // write message 77 | writer.write_all(data).await?; 78 | Ok(()) 79 | } 80 | 81 | /// Read and deserialize into the given type from the provided source, based on the length prefix. 82 | pub(crate) async fn read_lp<'a, R: AsyncRead + Unpin, T: Deserialize<'a>>( 83 | mut reader: R, 84 | buffer: &'a mut BytesMut, 85 | ) -> Result> { 86 | // read length prefix 87 | let size = match read_prefix(&mut reader).await { 88 | Ok(size) => size, 89 | Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => return Ok(None), 90 | Err(err) => return Err(err.into()), 91 | }; 92 | let mut reader = reader.take(size); 93 | 94 | let size = usize::try_from(size)?; 95 | let mut read = 0; 96 | while read != size { 97 | let r = reader.read_buf(buffer).await?; 98 | read += r; 99 | if r == 0 { 100 | break; 101 | } 102 | } 103 | let response: T = postcard::from_bytes(&buffer[..size])?; 104 | debug!("read message of size {}", size); 105 | 106 | Ok(Some((response, size))) 107 | } 108 | 109 | /// Return a buffer for the data, based on a given size, from the given source. 110 | /// The new buffer is split off from the buffer that is passed into the function. 111 | pub(crate) async fn read_size_data( 112 | size: u64, 113 | reader: R, 114 | buffer: &mut BytesMut, 115 | ) -> Result { 116 | debug!("reading {}", size); 117 | let mut reader = reader.take(size); 118 | let size = usize::try_from(size)?; 119 | let mut read = 0; 120 | while read != size { 121 | let r = reader.read_buf(buffer).await?; 122 | read += r; 123 | if r == 0 { 124 | break; 125 | } 126 | } 127 | debug!("finished reading"); 128 | Ok(buffer.split_to(size).freeze()) 129 | } 130 | 131 | /// Read and decode the given bao encoded data from the provided source. 132 | /// 133 | /// After the data is read successfully, the reader will be at the end of the data. 134 | /// If there is an error, the reader can be anywhere, so it is recommended to discard it. 135 | pub(crate) async fn read_bao_encoded( 136 | reader: R, 137 | hash: Hash, 138 | ) -> Result> { 139 | let mut decoder = AsyncSliceDecoder::new(reader, &hash.into(), 0, u64::MAX); 140 | // we don't know the size yet, so we just allocate a reasonable amount 141 | let mut decoded = Vec::with_capacity(4096); 142 | decoder.read_to_end(&mut decoded).await?; 143 | Ok(decoded) 144 | } 145 | 146 | /// Return a buffer of the data, based on the length prefix, from the given source. 147 | /// The new buffer is split off from the buffer that is passed in the function. 148 | pub(crate) async fn read_lp_data( 149 | mut reader: R, 150 | buffer: &mut BytesMut, 151 | ) -> Result> { 152 | // read length prefix 153 | let size = read_prefix(&mut reader).await?; 154 | 155 | let response = read_size_data(size, reader, buffer).await?; 156 | Ok(Some(response)) 157 | } 158 | 159 | async fn read_prefix(mut reader: R) -> Result { 160 | // read length prefix 161 | let size = reader.read_u64_le().await?; 162 | Ok(size) 163 | } 164 | 165 | /// A token used to authenticate a handshake. 166 | /// 167 | /// The token has a printable representation which can be serialised using [`Display`] and 168 | /// deserialised using [`FromStr`]. 169 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Deserialize, Serialize, MaxSize)] 170 | pub struct AuthToken { 171 | bytes: [u8; 32], 172 | } 173 | 174 | impl AuthToken { 175 | /// Generates a new random token. 176 | pub fn generate() -> Self { 177 | Self { 178 | bytes: rand::random(), 179 | } 180 | } 181 | } 182 | 183 | /// Serialises the [`AuthToken`] to base64. 184 | impl Display for AuthToken { 185 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 186 | write!(f, "{}", util::encode(self.bytes)) 187 | } 188 | } 189 | 190 | /// Error for parsing [`AuthToken`] using [`FromStr`]. 191 | #[derive(thiserror::Error, Debug)] 192 | pub enum AuthTokenParseError { 193 | /// Invalid base64 encoding. 194 | #[error("invalid encoding: {0}")] 195 | Base64(#[from] base64::DecodeError), 196 | /// Invalid length. 197 | #[error("invalid length: {0}")] 198 | Length(usize), 199 | } 200 | 201 | /// Deserialises the [`AuthToken`] from base64. 202 | impl FromStr for AuthToken { 203 | type Err = AuthTokenParseError; 204 | 205 | fn from_str(s: &str) -> std::result::Result { 206 | let decoded = util::decode(s)?; 207 | let bytes = decoded 208 | .try_into() 209 | .map_err(|v: Vec| AuthTokenParseError::Length(v.len()))?; 210 | Ok(AuthToken { bytes }) 211 | } 212 | } 213 | 214 | /// Reasons to close connections or stop streams. 215 | /// 216 | /// A QUIC **connection** can be *closed* and a **stream** can request the other side to 217 | /// *stop* sending data. Both closing and stopping have an associated `error_code`, closing 218 | /// also adds a `reason` as some arbitrary bytes. 219 | /// 220 | /// This enum exists so we have a single namespace for `error_code`s used. 221 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 222 | #[repr(u16)] 223 | pub(crate) enum Closed { 224 | /// The [`quinn::RecvStream`] was dropped. 225 | /// 226 | /// Used implicitly when a [`quinn::RecvStream`] is dropped without explicit call to 227 | /// [`quinn::RecvStream::stop`]. We don't use this explicitly but this is here as 228 | /// documentation as to what happened to `0`. 229 | StreamDropped = 0, 230 | /// The provider is terminating. 231 | /// 232 | /// When a provider terminates all connections and associated streams are closed. 233 | ProviderTerminating = 1, 234 | /// The provider has received the request. 235 | /// 236 | /// Only a single request is allowed on a stream, if more data is received after this a 237 | /// provider may send this error code in a STOP_STREAM frame. 238 | RequestReceived = 2, 239 | } 240 | 241 | impl Closed { 242 | pub fn reason(&self) -> &'static [u8] { 243 | match self { 244 | Closed::StreamDropped => &b"stream dropped"[..], 245 | Closed::ProviderTerminating => &b"provider terminating"[..], 246 | Closed::RequestReceived => &b"request received"[..], 247 | } 248 | } 249 | } 250 | 251 | impl From for VarInt { 252 | fn from(source: Closed) -> Self { 253 | VarInt::from(source as u16) 254 | } 255 | } 256 | 257 | /// Unknown error_code, can not be converted into [`Closed`]. 258 | #[derive(thiserror::Error, Debug)] 259 | #[error("Unknown error_code: {0}")] 260 | pub(crate) struct UnknownErrorCode(u64); 261 | 262 | impl TryFrom for Closed { 263 | type Error = UnknownErrorCode; 264 | 265 | fn try_from(value: VarInt) -> std::result::Result { 266 | match value.into_inner() { 267 | 0 => Ok(Self::StreamDropped), 268 | 1 => Ok(Self::ProviderTerminating), 269 | 2 => Ok(Self::RequestReceived), 270 | val => Err(UnknownErrorCode(val)), 271 | } 272 | } 273 | } 274 | 275 | #[cfg(test)] 276 | mod tests { 277 | use super::*; 278 | 279 | #[test] 280 | fn test_auth_token_base64() { 281 | let token = AuthToken::generate(); 282 | println!("token: {token}"); 283 | let base64 = token.to_string(); 284 | println!("token: {base64}"); 285 | let decoded = AuthToken::from_str(&base64).unwrap(); 286 | assert_eq!(decoded, token); 287 | 288 | let err = AuthToken::from_str("not-base64").err().unwrap(); 289 | println!("err {err:#}"); 290 | assert!(matches!(err, AuthTokenParseError::Base64(_))); 291 | 292 | let err = AuthToken::from_str("abcd").err().unwrap(); 293 | println!("err {err:#}"); 294 | assert!(matches!(err, AuthTokenParseError::Length(3))); 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /src/provider.rs: -------------------------------------------------------------------------------- 1 | //! Provider API 2 | //! 3 | //! A provider is a server that serves content-addressed data (blobs or collections). 4 | //! To create a provider, create a database using [`create_collection`], then build a 5 | //! provider using [`Builder`] and spawn it using [`Builder::spawn`]. 6 | //! 7 | //! You can monitor what is happening in the provider using [`Provider::subscribe`]. 8 | //! 9 | //! To shut down the provider, call [`Provider::shutdown`]. 10 | use std::fmt::{self, Display}; 11 | use std::future::Future; 12 | use std::io::{BufReader, Read}; 13 | use std::net::SocketAddr; 14 | use std::path::PathBuf; 15 | use std::pin::Pin; 16 | use std::str::FromStr; 17 | use std::task::Poll; 18 | use std::{collections::HashMap, sync::Arc}; 19 | 20 | use abao::encode::SliceExtractor; 21 | use anyhow::{bail, ensure, Context, Result}; 22 | use bytes::{Bytes, BytesMut}; 23 | use futures::future; 24 | use serde::{Deserialize, Serialize}; 25 | use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; 26 | use tokio::sync::broadcast; 27 | use tokio::task::{JoinError, JoinHandle}; 28 | use tokio_util::io::SyncIoBridge; 29 | use tokio_util::sync::CancellationToken; 30 | use tracing::{debug, debug_span, warn}; 31 | use tracing_futures::Instrument; 32 | 33 | use crate::blobs::{Blob, Collection}; 34 | use crate::protocol::{ 35 | read_lp, write_lp, AuthToken, Closed, Handshake, Request, Res, Response, VERSION, 36 | }; 37 | use crate::tls::{self, Keypair, PeerId}; 38 | use crate::util::{self, Hash}; 39 | 40 | const MAX_CONNECTIONS: u32 = 1024; 41 | const MAX_STREAMS: u64 = 10; 42 | 43 | /// Database containing content-addressed data (blobs or collections). 44 | #[derive(Debug, Clone)] 45 | pub struct Database(Arc>); 46 | 47 | impl Database { 48 | fn get(&self, key: &Hash) -> Option<&BlobOrCollection> { 49 | self.0.get(key) 50 | } 51 | 52 | /// Iterate over all blobs in the database. 53 | pub fn blobs(&self) -> impl Iterator + '_ { 54 | self.0 55 | .iter() 56 | .filter_map(|(k, v)| match v { 57 | BlobOrCollection::Blob(data) => Some((k, data)), 58 | BlobOrCollection::Collection(_) => None, 59 | }) 60 | .map(|(k, data)| (k, &data.path, data.size)) 61 | } 62 | } 63 | 64 | /// Builder for the [`Provider`]. 65 | /// 66 | /// You must supply a database which can be created using [`create_collection`], everything else is 67 | /// optional. Finally you can create and run the provider by calling [`Builder::spawn`]. 68 | /// 69 | /// The returned [`Provider`] is awaitable to know when it finishes. It can be terminated 70 | /// using [`Provider::shutdown`]. 71 | #[derive(Debug)] 72 | pub struct Builder { 73 | bind_addr: SocketAddr, 74 | keypair: Keypair, 75 | auth_token: AuthToken, 76 | db: Database, 77 | keylog: bool, 78 | } 79 | 80 | #[derive(Debug)] 81 | pub(crate) enum BlobOrCollection { 82 | Blob(Data), 83 | Collection((Bytes, Bytes)), 84 | } 85 | 86 | impl Builder { 87 | /// Creates a new builder for [`Provider`] using the given [`Database`]. 88 | pub fn with_db(db: Database) -> Self { 89 | Self { 90 | bind_addr: "127.0.0.1:4433".parse().unwrap(), 91 | keypair: Keypair::generate(), 92 | auth_token: AuthToken::generate(), 93 | db, 94 | keylog: false, 95 | } 96 | } 97 | 98 | /// Binds the provider service to a different socket. 99 | /// 100 | /// By default it binds to `127.0.0.1:4433`. 101 | pub fn bind_addr(mut self, addr: SocketAddr) -> Self { 102 | self.bind_addr = addr; 103 | self 104 | } 105 | 106 | /// Uses the given [`Keypair`] for the [`PeerId`] instead of a newly generated one. 107 | pub fn keypair(mut self, keypair: Keypair) -> Self { 108 | self.keypair = keypair; 109 | self 110 | } 111 | 112 | /// Uses the given [`AuthToken`] instead of a newly generated one. 113 | pub fn auth_token(mut self, auth_token: AuthToken) -> Self { 114 | self.auth_token = auth_token; 115 | self 116 | } 117 | 118 | /// Whether to log the SSL pre-master key. 119 | /// 120 | /// If `true` and the `SSLKEYLOGFILE` environment variable is the path to a file this 121 | /// file will be used to log the SSL pre-master key. This is useful to inspect captured 122 | /// traffic. 123 | pub fn keylog(mut self, keylog: bool) -> Self { 124 | self.keylog = keylog; 125 | self 126 | } 127 | 128 | /// Spawns the [`Provider`] in a tokio task. 129 | /// 130 | /// This will create the underlying network server and spawn a tokio task accepting 131 | /// connections. The returned [`Provider`] can be used to control the task as well as 132 | /// get information about it. 133 | pub fn spawn(self) -> Result { 134 | let tls_server_config = tls::make_server_config(&self.keypair, self.keylog)?; 135 | let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(tls_server_config)); 136 | let mut transport_config = quinn::TransportConfig::default(); 137 | transport_config 138 | .max_concurrent_bidi_streams(MAX_STREAMS.try_into()?) 139 | .max_concurrent_uni_streams(0u32.into()); 140 | 141 | server_config 142 | .transport_config(Arc::new(transport_config)) 143 | .concurrent_connections(MAX_CONNECTIONS); 144 | 145 | let endpoint = quinn::Endpoint::server(server_config, self.bind_addr)?; 146 | let listen_addr = endpoint.local_addr().unwrap(); 147 | let db2 = self.db.clone(); 148 | let (events_sender, _events_receiver) = broadcast::channel(8); 149 | let events = events_sender.clone(); 150 | let cancel_token = CancellationToken::new(); 151 | let task = { 152 | let cancel_token = cancel_token.clone(); 153 | tokio::spawn(async move { 154 | Self::run(endpoint, db2, self.auth_token, events_sender, cancel_token).await 155 | }) 156 | }; 157 | 158 | Ok(Provider { 159 | listen_addr, 160 | keypair: self.keypair, 161 | auth_token: self.auth_token, 162 | task, 163 | events, 164 | cancel_token, 165 | }) 166 | } 167 | 168 | async fn run( 169 | server: quinn::Endpoint, 170 | db: Database, 171 | auth_token: AuthToken, 172 | events: broadcast::Sender, 173 | cancel_token: CancellationToken, 174 | ) { 175 | debug!("\nlistening at: {:#?}", server.local_addr().unwrap()); 176 | 177 | loop { 178 | tokio::select! { 179 | biased; 180 | _ = cancel_token.cancelled() => break, 181 | Some(connecting) = server.accept() => { 182 | let db = db.clone(); 183 | let events = events.clone(); 184 | tokio::spawn(handle_connection(connecting, db, auth_token, events)); 185 | } 186 | else => break, 187 | } 188 | } 189 | 190 | // Closing the Endpoint is the equivalent of calling Connection::close on all 191 | // connections: Operations will immediately fail with 192 | // ConnectionError::LocallyClosed. All streams are interrupted, this is not 193 | // graceful. 194 | let error_code = Closed::ProviderTerminating; 195 | server.close(error_code.into(), error_code.reason()); 196 | } 197 | } 198 | 199 | /// A server which implements the sendme provider. 200 | /// 201 | /// Clients can connect to this server and requests hashes from it. 202 | /// 203 | /// The only way to create this is by using the [`Builder::spawn`]. [`Provider::builder`] 204 | /// is a shorthand to create a suitable [`Builder`]. 205 | /// 206 | /// This runs a tokio task which can be aborted and joined if desired. To join the task 207 | /// await the [`Provider`] struct directly, it will complete when the task completes. If 208 | /// this is dropped the provider task is not stopped but keeps running. 209 | #[derive(Debug)] 210 | pub struct Provider { 211 | listen_addr: SocketAddr, 212 | keypair: Keypair, 213 | auth_token: AuthToken, 214 | task: JoinHandle<()>, 215 | events: broadcast::Sender, 216 | cancel_token: CancellationToken, 217 | } 218 | 219 | /// Events emitted by the [`Provider`] informing about the current status. 220 | #[derive(Debug, Clone)] 221 | pub enum Event { 222 | /// A new client connected to the provider. 223 | ClientConnected { 224 | /// An unique connection id. 225 | connection_id: u64, 226 | }, 227 | /// A request was received from a client. 228 | RequestReceived { 229 | /// An unique connection id. 230 | connection_id: u64, 231 | /// The request id. 232 | request_id: u64, 233 | /// The hash for which the client wants to receive data. 234 | hash: Hash, 235 | }, 236 | /// A request was completed and the data was sent to the client. 237 | TransferCompleted { 238 | /// An unique connection id. 239 | connection_id: u64, 240 | /// The request id. 241 | request_id: u64, 242 | }, 243 | /// A request was aborted because the client disconnected. 244 | TransferAborted { 245 | /// The quic connection id. 246 | connection_id: u64, 247 | /// The request id. When `None`, the transfer was aborted before or during reading and decoding 248 | /// the transfer request. 249 | request_id: Option, 250 | }, 251 | } 252 | 253 | impl Provider { 254 | /// Returns a new builder for the [`Provider`]. 255 | /// 256 | /// Once the done with the builder call [`Builder::spawn`] to create the provider. 257 | pub fn builder(db: Database) -> Builder { 258 | Builder::with_db(db) 259 | } 260 | 261 | /// Returns the address on which the server is listening for connections. 262 | pub fn listen_addr(&self) -> SocketAddr { 263 | self.listen_addr 264 | } 265 | 266 | /// Returns the [`PeerId`] of the provider. 267 | pub fn peer_id(&self) -> PeerId { 268 | self.keypair.public().into() 269 | } 270 | 271 | /// Returns the [`AuthToken`] needed to connect to the provider. 272 | pub fn auth_token(&self) -> AuthToken { 273 | self.auth_token 274 | } 275 | 276 | /// Subscribe to [`Event`]s emitted from the provider, informing about connections and 277 | /// progress. 278 | pub fn subscribe(&self) -> broadcast::Receiver { 279 | self.events.subscribe() 280 | } 281 | 282 | /// Return a single token containing everything needed to get a hash. 283 | /// 284 | /// See [`Ticket`] for more details of how it can be used. 285 | pub fn ticket(&self, hash: Hash) -> Ticket { 286 | // TODO: Verify that the hash exists in the db? 287 | Ticket { 288 | hash, 289 | peer: self.peer_id(), 290 | addr: self.listen_addr, 291 | token: self.auth_token, 292 | } 293 | } 294 | 295 | /// Aborts the provider. 296 | /// 297 | /// This does not gracefully terminate currently: all connections are closed and 298 | /// anything in-transit is lost. The task will stop running and awaiting this 299 | /// [`Provider`] will complete. 300 | /// 301 | /// The shutdown behaviour will become more graceful in the future. 302 | pub fn shutdown(&self) { 303 | self.cancel_token.cancel(); 304 | } 305 | } 306 | 307 | /// The future completes when the spawned tokio task finishes. 308 | impl Future for Provider { 309 | type Output = Result<(), JoinError>; 310 | 311 | fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { 312 | Pin::new(&mut self.task).poll(cx) 313 | } 314 | } 315 | 316 | async fn handle_connection( 317 | connecting: quinn::Connecting, 318 | db: Database, 319 | auth_token: AuthToken, 320 | events: broadcast::Sender, 321 | ) { 322 | let remote_addr = connecting.remote_address(); 323 | let connection = match connecting.await { 324 | Ok(conn) => conn, 325 | Err(err) => { 326 | warn!(%remote_addr, "Error connecting: {err:#}"); 327 | return; 328 | } 329 | }; 330 | let connection_id = connection.stable_id() as u64; 331 | let span = debug_span!("connection", connection_id, %remote_addr); 332 | async move { 333 | while let Ok(stream) = connection.accept_bi().await { 334 | let span = debug_span!("stream", stream_id = %stream.0.id()); 335 | events.send(Event::ClientConnected { connection_id }).ok(); 336 | let db = db.clone(); 337 | let events = events.clone(); 338 | tokio::spawn( 339 | async move { 340 | if let Err(err) = 341 | handle_stream(db, auth_token, connection_id, stream, events).await 342 | { 343 | warn!("error: {err:#?}",); 344 | } 345 | } 346 | .instrument(span), 347 | ); 348 | } 349 | } 350 | .instrument(span) 351 | .await 352 | } 353 | 354 | /// Read and decode the handshake. 355 | /// 356 | /// Will fail if there is an error while reading, there is a token 357 | /// mismatch, or no valid handshake was received. 358 | /// 359 | /// When successful, the reader is still useable after this function and the buffer will be drained of any handshake 360 | /// data. 361 | async fn read_handshake( 362 | mut reader: R, 363 | buffer: &mut BytesMut, 364 | token: AuthToken, 365 | ) -> Result<()> { 366 | if let Some((handshake, size)) = read_lp::<_, Handshake>(&mut reader, buffer).await? { 367 | ensure!( 368 | handshake.version == VERSION, 369 | "expected version {} but got {}", 370 | VERSION, 371 | handshake.version 372 | ); 373 | ensure!(handshake.token == token, "AuthToken mismatch"); 374 | let _ = buffer.split_to(size); 375 | } else { 376 | bail!("no valid handshake received"); 377 | } 378 | Ok(()) 379 | } 380 | 381 | /// Read the request from the getter. 382 | /// 383 | /// Will fail if there is an error while reading, if the reader 384 | /// contains more data than the Request, or if no valid request is sent. 385 | /// 386 | /// When successful, the buffer is empty after this function call. 387 | async fn read_request(mut reader: quinn::RecvStream, buffer: &mut BytesMut) -> Result { 388 | let request = read_lp::<_, Request>(&mut reader, buffer).await?; 389 | ensure!( 390 | reader.read_chunk(8, false).await?.is_none(), 391 | "Extra data past request" 392 | ); 393 | if let Some((request, _size)) = request { 394 | Ok(request) 395 | } else { 396 | bail!("No request received"); 397 | } 398 | } 399 | 400 | /// Transfers the collection & blob data. 401 | /// 402 | /// First, it transfers the collection data & its associated outboard encoding data. Then it sequentially transfers each individual blob data & its associated outboard 403 | /// encoding data. 404 | /// 405 | /// Will fail if there is an error writing to the getter or reading from 406 | /// the database. 407 | /// 408 | /// If a blob from the collection cannot be found in the database, the transfer will gracefully 409 | /// close the writer, and return with `Ok(SentStatus::NotFound)`. 410 | /// 411 | /// If the transfer does _not_ end in error, the buffer will be empty and the writer is gracefully closed. 412 | async fn transfer_collection( 413 | // Database from which to fetch blobs. 414 | db: &Database, 415 | // Quinn stream. 416 | mut writer: quinn::SendStream, 417 | // Buffer used when writing to writer. 418 | buffer: &mut BytesMut, 419 | // The id of the transfer request. 420 | request_id: u64, 421 | // The bao outboard encoded data. 422 | outboard: &Bytes, 423 | // The actual blob data. 424 | data: &Bytes, 425 | ) -> Result { 426 | // We only respond to requests for collections, not individual blobs 427 | let mut extractor = SliceExtractor::new_outboard( 428 | std::io::Cursor::new(&data[..]), 429 | std::io::Cursor::new(&outboard[..]), 430 | 0, 431 | data.len() as u64, 432 | ); 433 | let encoded_size: usize = abao::encode::encoded_size(data.len() as u64) 434 | .try_into() 435 | .unwrap(); 436 | let mut encoded = Vec::with_capacity(encoded_size); 437 | extractor.read_to_end(&mut encoded)?; 438 | 439 | let c: Collection = postcard::from_bytes(data)?; 440 | 441 | // TODO: we should check if the blobs referenced in this container 442 | // actually exist in this provider before returning `FoundCollection` 443 | write_response( 444 | &mut writer, 445 | buffer, 446 | request_id, 447 | Res::FoundCollection { 448 | total_blobs_size: c.total_blobs_size, 449 | }, 450 | ) 451 | .await?; 452 | 453 | let mut data = BytesMut::from(&encoded[..]); 454 | writer.write_buf(&mut data).await?; 455 | for (i, blob) in c.blobs.iter().enumerate() { 456 | debug!("writing blob {}/{}", i, c.blobs.len()); 457 | let (status, writer1) = 458 | send_blob(db.clone(), blob.hash, writer, buffer, request_id).await?; 459 | writer = writer1; 460 | if SentStatus::NotFound == status { 461 | write_response(&mut writer, buffer, request_id, Res::NotFound).await?; 462 | writer.finish().await?; 463 | return Ok(status); 464 | } 465 | } 466 | 467 | writer.finish().await?; 468 | Ok(SentStatus::Sent) 469 | } 470 | 471 | fn notify_transfer_aborted( 472 | events: broadcast::Sender, 473 | connection_id: u64, 474 | request_id: Option, 475 | ) { 476 | let _ = events.send(Event::TransferAborted { 477 | connection_id, 478 | request_id, 479 | }); 480 | } 481 | 482 | async fn handle_stream( 483 | db: Database, 484 | token: AuthToken, 485 | connection_id: u64, 486 | (mut writer, mut reader): (quinn::SendStream, quinn::RecvStream), 487 | events: broadcast::Sender, 488 | ) -> Result<()> { 489 | let mut out_buffer = BytesMut::with_capacity(1024); 490 | let mut in_buffer = BytesMut::with_capacity(1024); 491 | 492 | // 1. Read Handshake 493 | debug!("reading handshake"); 494 | if let Err(e) = read_handshake(&mut reader, &mut in_buffer, token).await { 495 | notify_transfer_aborted(events, connection_id, None); 496 | return Err(e); 497 | } 498 | 499 | // 2. Decode the request. 500 | debug!("reading request"); 501 | let request = match read_request(reader, &mut in_buffer).await { 502 | Ok(r) => r, 503 | Err(e) => { 504 | notify_transfer_aborted(events, connection_id, None); 505 | return Err(e); 506 | } 507 | }; 508 | 509 | let hash = request.name; 510 | debug!("got request({})", request.id); 511 | let _ = events.send(Event::RequestReceived { 512 | connection_id, 513 | request_id: request.id, 514 | hash, 515 | }); 516 | 517 | // 4. Attempt to find hash 518 | let (outboard, data) = match db.get(&hash) { 519 | // We only respond to requests for collections, not individual blobs 520 | Some(BlobOrCollection::Collection(d)) => d, 521 | _ => { 522 | debug!("not found {}", hash); 523 | notify_transfer_aborted(events, connection_id, Some(request.id)); 524 | write_response(&mut writer, &mut out_buffer, request.id, Res::NotFound).await?; 525 | writer.finish().await?; 526 | 527 | return Ok(()); 528 | } 529 | }; 530 | 531 | // 5. Transfer data! 532 | match transfer_collection(&db, writer, &mut out_buffer, request.id, outboard, data).await { 533 | Ok(SentStatus::Sent) => { 534 | let _ = events.send(Event::TransferCompleted { 535 | connection_id, 536 | request_id: request.id, 537 | }); 538 | } 539 | Ok(SentStatus::NotFound) => { 540 | notify_transfer_aborted(events, connection_id, Some(request.id)); 541 | } 542 | Err(e) => { 543 | notify_transfer_aborted(events, connection_id, Some(request.id)); 544 | return Err(e); 545 | } 546 | } 547 | 548 | debug!("finished response"); 549 | Ok(()) 550 | } 551 | 552 | #[derive(Clone, Debug, PartialEq, Eq)] 553 | enum SentStatus { 554 | Sent, 555 | NotFound, 556 | } 557 | 558 | async fn send_blob( 559 | db: Database, 560 | name: Hash, 561 | mut writer: W, 562 | buffer: &mut BytesMut, 563 | id: u64, 564 | ) -> Result<(SentStatus, W)> { 565 | match db.get(&name) { 566 | Some(BlobOrCollection::Blob(Data { 567 | outboard, 568 | path, 569 | size, 570 | })) => { 571 | write_response(&mut writer, buffer, id, Res::Found).await?; 572 | let path = path.clone(); 573 | let outboard = outboard.clone(); 574 | let size = *size; 575 | // need to thread the writer though the spawn_blocking, since 576 | // taking a reference does not work. spawn_blocking requires 577 | // 'static lifetime. 578 | writer = tokio::task::spawn_blocking(move || { 579 | let file_reader = std::fs::File::open(&path)?; 580 | let outboard_reader = std::io::Cursor::new(outboard); 581 | let mut wrapper = SyncIoBridge::new(&mut writer); 582 | let mut slice_extractor = abao::encode::SliceExtractor::new_outboard( 583 | file_reader, 584 | outboard_reader, 585 | 0, 586 | size, 587 | ); 588 | let _copied = std::io::copy(&mut slice_extractor, &mut wrapper)?; 589 | std::io::Result::Ok(writer) 590 | }) 591 | .await??; 592 | Ok((SentStatus::Sent, writer)) 593 | } 594 | _ => { 595 | write_response(&mut writer, buffer, id, Res::NotFound).await?; 596 | Ok((SentStatus::NotFound, writer)) 597 | } 598 | } 599 | } 600 | 601 | #[derive(Clone, Debug, PartialEq, Eq)] 602 | pub(crate) struct Data { 603 | /// Outboard data from bao. 604 | outboard: Bytes, 605 | /// Path to the original data, which must not change while in use. 606 | path: PathBuf, 607 | /// Size of the original data. 608 | size: u64, 609 | } 610 | 611 | /// A data source 612 | #[derive(Debug)] 613 | pub enum DataSource { 614 | /// A blob of data originating from the filesystem. The name of the blob is derived from 615 | /// the filename. 616 | File(PathBuf), 617 | /// NamedFile is treated the same as [`DataSource::File`], except you can pass in a custom 618 | /// name. Passing in the empty string will explicitly _not_ persist the filename. 619 | NamedFile { 620 | /// Path to the file 621 | path: PathBuf, 622 | /// Custom name 623 | name: String, 624 | }, 625 | } 626 | 627 | impl DataSource { 628 | /// Creates a new [`DataSource`] from a [`PathBuf`]. 629 | pub fn new(path: PathBuf) -> Self { 630 | DataSource::File(path) 631 | } 632 | /// Creates a new [`DataSource`] from a [`PathBuf`] and a custom name. 633 | pub fn with_name(path: PathBuf, name: String) -> Self { 634 | DataSource::NamedFile { path, name } 635 | } 636 | } 637 | 638 | impl From for DataSource { 639 | fn from(value: PathBuf) -> Self { 640 | DataSource::new(value) 641 | } 642 | } 643 | 644 | impl From<&std::path::Path> for DataSource { 645 | fn from(value: &std::path::Path) -> Self { 646 | DataSource::new(value.to_path_buf()) 647 | } 648 | } 649 | 650 | /// Synchronously compute the outboard of a file, and return hash and outboard. 651 | /// 652 | /// It is assumed that the file is not modified while this is running. 653 | /// 654 | /// If it is modified while or after this is running, the outboard will be 655 | /// invalid, so any attempt to compute a slice from it will fail. 656 | /// 657 | /// If the size of the file is changed while this is running, an error will be 658 | /// returned. 659 | /// 660 | /// path and name are returned with the result to provide context 661 | fn compute_outboard( 662 | path: PathBuf, 663 | name: Option, 664 | ) -> anyhow::Result<(PathBuf, Option, Hash, Vec)> { 665 | ensure!( 666 | path.is_file(), 667 | "can only transfer blob data: {}", 668 | path.display() 669 | ); 670 | let file = std::fs::File::open(&path)?; 671 | let len = file.metadata()?.len(); 672 | // compute outboard size so we can pre-allocate the buffer. 673 | // 674 | // outboard is ~1/16 of data size, so this will fail for really large files 675 | // on really small devices. E.g. you want to transfer a 1TB file from a pi4 with 1gb ram. 676 | // 677 | // The way to solve this would be to have larger blocks than the blake3 chunk size of 1024. 678 | // I think we really want to keep the outboard in memory for simplicity. 679 | let outboard_size = usize::try_from(abao::encode::outboard_size(len)) 680 | .context("outboard too large to fit in memory")?; 681 | let mut outboard = Vec::with_capacity(outboard_size); 682 | 683 | // copy the file into the encoder. Data will be skipped by the encoder in outboard mode. 684 | let outboard_cursor = std::io::Cursor::new(&mut outboard); 685 | let mut encoder = abao::encode::Encoder::new_outboard(outboard_cursor); 686 | 687 | let mut reader = BufReader::new(file); 688 | // the length we have actually written, should be the same as the length of the file. 689 | let len2 = std::io::copy(&mut reader, &mut encoder)?; 690 | // this can fail if the file was appended to during encoding. 691 | ensure!(len == len2, "file changed during encoding"); 692 | // this flips the outboard encoding from post-order to pre-order 693 | let hash = encoder.finalize()?; 694 | 695 | Ok((path, name, hash.into(), outboard)) 696 | } 697 | 698 | /// Creates a database of blobs (stored in outboard storage) and Collections, stored in memory. 699 | /// Returns a the hash of the collection created by the given list of DataSources 700 | pub async fn create_collection(data_sources: Vec) -> Result<(Database, Hash)> { 701 | // +1 is for the collection itself 702 | let mut db = HashMap::with_capacity(data_sources.len() + 1); 703 | let mut blobs = Vec::with_capacity(data_sources.len()); 704 | let mut total_blobs_size: u64 = 0; 705 | let mut blobs_encoded_size_estimate = 0; 706 | 707 | // compute outboards in parallel, using tokio's blocking thread pool 708 | let outboards = data_sources.into_iter().map(|data| { 709 | let (path, name) = match data { 710 | DataSource::File(path) => (path, None), 711 | DataSource::NamedFile { path, name } => (path, Some(name)), 712 | }; 713 | tokio::task::spawn_blocking(move || compute_outboard(path, name)) 714 | }); 715 | // wait for completion and collect results 716 | let outboards = future::join_all(outboards) 717 | .await 718 | .into_iter() 719 | .collect::, _>, _>>()??; 720 | // insert outboards into the database and build collection 721 | 722 | for (path, name, hash, outboard) in outboards { 723 | debug_assert!(outboard.len() >= 8, "outboard must at least contain size"); 724 | let size = u64::from_le_bytes(outboard[..8].try_into().unwrap()); 725 | db.insert( 726 | hash, 727 | BlobOrCollection::Blob(Data { 728 | outboard: Bytes::from(outboard), 729 | path: path.clone(), 730 | size, 731 | }), 732 | ); 733 | total_blobs_size += size; 734 | // if the given name is `None`, use the filename from the given path as the name 735 | let name = name.unwrap_or_else(|| { 736 | path.file_name() 737 | .and_then(|s| s.to_str()) 738 | .unwrap_or_default() 739 | .to_string() 740 | }); 741 | blobs_encoded_size_estimate += name.len() + 32; 742 | blobs.push(Blob { name, hash }); 743 | } 744 | 745 | let c = Collection { 746 | name: "collection".to_string(), 747 | blobs, 748 | total_blobs_size, 749 | }; 750 | blobs_encoded_size_estimate += c.name.len(); 751 | 752 | // NOTE: we can't use the postcard::MaxSize to estimate the encoding buffer size 753 | // because the Collection and Blobs have `String` fields. 754 | // So instead, we are tracking the filename + hash sizes of each blob, plus an extra 1024 755 | // to account for any postcard encoding data. 756 | let mut buffer = BytesMut::zeroed(blobs_encoded_size_estimate + 1024); 757 | let data = postcard::to_slice(&c, &mut buffer)?; 758 | let (outboard, hash) = abao::encode::outboard(&data); 759 | let hash = Hash::from(hash); 760 | db.insert( 761 | hash, 762 | BlobOrCollection::Collection((Bytes::from(outboard), Bytes::from(data.to_vec()))), 763 | ); 764 | 765 | Ok((Database(Arc::new(db)), hash)) 766 | } 767 | 768 | async fn write_response( 769 | mut writer: W, 770 | buffer: &mut BytesMut, 771 | id: u64, 772 | res: Res, 773 | ) -> Result<()> { 774 | let response = Response { id, data: res }; 775 | 776 | // TODO: do not transfer blob data as part of the responses 777 | if buffer.len() < 1024 { 778 | buffer.resize(1024, 0u8); 779 | } 780 | let used = postcard::to_slice(&response, buffer)?; 781 | 782 | write_lp(&mut writer, used).await?; 783 | 784 | debug!("written response of length {}", used.len()); 785 | Ok(()) 786 | } 787 | 788 | /// A token containing everything to get a file from the provider. 789 | /// 790 | /// It is a single item which can be easily serialized and deserialized. The [`Display`] 791 | /// and [`FromStr`] implementations serialize to base64. 792 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 793 | pub struct Ticket { 794 | /// The hash to retrieve. 795 | pub hash: Hash, 796 | /// The peer ID identifying the provider. 797 | pub peer: PeerId, 798 | /// The socket address the provider is listening on. 799 | pub addr: SocketAddr, 800 | /// The authentication token with permission to retrieve the hash. 801 | pub token: AuthToken, 802 | } 803 | 804 | impl Ticket { 805 | /// Deserializes from bytes. 806 | pub fn from_bytes(bytes: &[u8]) -> Result { 807 | let slf = postcard::from_bytes(bytes)?; 808 | Ok(slf) 809 | } 810 | 811 | /// Serializes to bytes. 812 | pub fn to_bytes(&self) -> Vec { 813 | postcard::to_stdvec(self).expect("postcard::to_stdvec is infallible") 814 | } 815 | } 816 | 817 | /// Serializes to base64. 818 | impl Display for Ticket { 819 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 820 | let encoded = self.to_bytes(); 821 | write!(f, "{}", util::encode(encoded)) 822 | } 823 | } 824 | 825 | /// Deserializes from base64. 826 | impl FromStr for Ticket { 827 | type Err = anyhow::Error; 828 | 829 | fn from_str(s: &str) -> Result { 830 | let bytes = util::decode(s)?; 831 | let slf = Self::from_bytes(&bytes)?; 832 | Ok(slf) 833 | } 834 | } 835 | 836 | #[cfg(test)] 837 | mod tests { 838 | use std::str::FromStr; 839 | use testdir::testdir; 840 | 841 | use super::*; 842 | 843 | #[test] 844 | fn test_ticket_base64_roundtrip() { 845 | let (_encoded, hash) = abao::encode::encode(b"hi there"); 846 | let hash = Hash::from(hash); 847 | let peer = PeerId::from(Keypair::generate().public()); 848 | let addr = SocketAddr::from_str("127.0.0.1:1234").unwrap(); 849 | let token = AuthToken::generate(); 850 | let ticket = Ticket { 851 | hash, 852 | peer, 853 | addr, 854 | token, 855 | }; 856 | let base64 = ticket.to_string(); 857 | println!("Ticket: {base64}"); 858 | println!("{} bytes", base64.len()); 859 | 860 | let ticket2: Ticket = base64.parse().unwrap(); 861 | assert_eq!(ticket2, ticket); 862 | } 863 | 864 | #[tokio::test] 865 | async fn test_create_collection() -> Result<()> { 866 | let dir: PathBuf = testdir!(); 867 | let mut expect_blobs = vec![]; 868 | let (_, hash) = abao::encode::outboard(vec![]); 869 | let hash = Hash::from(hash); 870 | 871 | // DataSource::File 872 | let foo = dir.join("foo"); 873 | tokio::fs::write(&foo, vec![]).await?; 874 | let foo = DataSource::new(foo); 875 | expect_blobs.push(Blob { 876 | name: "foo".to_string(), 877 | hash, 878 | }); 879 | 880 | // DataSource::NamedFile 881 | let bar = dir.join("bar"); 882 | tokio::fs::write(&bar, vec![]).await?; 883 | let bar = DataSource::with_name(bar, "bat".to_string()); 884 | expect_blobs.push(Blob { 885 | name: "bat".to_string(), 886 | hash, 887 | }); 888 | 889 | // DataSource::NamedFile, empty string name 890 | let baz = dir.join("baz"); 891 | tokio::fs::write(&baz, vec![]).await?; 892 | let baz = DataSource::with_name(baz, "".to_string()); 893 | expect_blobs.push(Blob { 894 | name: "".to_string(), 895 | hash, 896 | }); 897 | 898 | let expect_collection = Collection { 899 | name: "collection".to_string(), 900 | blobs: expect_blobs, 901 | total_blobs_size: 0, 902 | }; 903 | 904 | let (db, hash) = create_collection(vec![foo, bar, baz]).await?; 905 | 906 | let collection = { 907 | let c = db.get(&hash).unwrap(); 908 | if let BlobOrCollection::Collection((_, data)) = c { 909 | Collection::from_bytes(data)? 910 | } else { 911 | panic!("expected hash to correspond with a `Collection`, found `Blob` instead"); 912 | } 913 | }; 914 | 915 | assert_eq!(expect_collection, collection); 916 | 917 | Ok(()) 918 | } 919 | } 920 | -------------------------------------------------------------------------------- /src/tls.rs: -------------------------------------------------------------------------------- 1 | //! TLS configuration based on libp2p TLS specs. 2 | //! 3 | //! See . 4 | //! Based on rust-libp2p/transports/tls 5 | 6 | pub mod certificate; 7 | mod verifier; 8 | 9 | use std::{ 10 | fmt::{Debug, Display}, 11 | ops::Deref, 12 | str::FromStr, 13 | sync::Arc, 14 | }; 15 | 16 | pub use ed25519_dalek::{PublicKey, SecretKey, Signature}; 17 | use serde::{Deserialize, Serialize}; 18 | use ssh_key::LineEnding; 19 | 20 | use crate::util; 21 | 22 | const P2P_ALPN: [u8; 9] = *b"n0/iroh/1"; 23 | 24 | /// A keypair. 25 | #[derive(Debug)] 26 | pub struct Keypair(ed25519_dalek::Keypair); 27 | 28 | impl Deref for Keypair { 29 | type Target = ed25519_dalek::Keypair; 30 | 31 | fn deref(&self) -> &Self::Target { 32 | &self.0 33 | } 34 | } 35 | 36 | impl Keypair { 37 | /// The public key of this keypair. 38 | pub fn public(&self) -> PublicKey { 39 | self.0.public 40 | } 41 | 42 | /// The secret key of this keypair. 43 | pub fn secret(&self) -> &SecretKey { 44 | &self.0.secret 45 | } 46 | 47 | /// Generate a new keypair. 48 | pub fn generate() -> Self { 49 | let mut rng = rand::rngs::OsRng; 50 | let key = ed25519_dalek::Keypair::generate(&mut rng); 51 | Self(key) 52 | } 53 | 54 | /// Serialise the keypair to OpenSSH format. 55 | pub fn to_openssh(&self) -> ssh_key::Result> { 56 | let ckey = ssh_key::private::Ed25519Keypair::from(&self.0); 57 | ssh_key::private::PrivateKey::from(ckey).to_openssh(LineEnding::default()) 58 | } 59 | 60 | /// Deserialise the keypair from OpenSSH format. 61 | pub fn try_from_openssh>(data: T) -> anyhow::Result { 62 | let ser_key = ssh_key::private::PrivateKey::from_openssh(data)?; 63 | match ser_key.key_data() { 64 | ssh_key::private::KeypairData::Ed25519(kp) => { 65 | let dalek_keypair: ed25519_dalek::Keypair = kp.try_into()?; 66 | Ok(Keypair::from(dalek_keypair)) 67 | } 68 | _ => anyhow::bail!("invalid key format"), 69 | } 70 | } 71 | 72 | fn sign(&self, msg: &[u8]) -> Signature { 73 | use ed25519_dalek::Signer; 74 | 75 | self.0.sign(msg) 76 | } 77 | } 78 | 79 | impl From for Keypair { 80 | fn from(value: ed25519_dalek::Keypair) -> Self { 81 | Keypair(value) 82 | } 83 | } 84 | 85 | // TODO: probably needs a version field 86 | /// An identifier for networked peers. 87 | /// 88 | /// Each network node has a cryptographic identifier which can be used to make sure you are 89 | /// connecting to the right peer. 90 | /// 91 | /// # `Display` and `FromStr` 92 | /// 93 | /// The [`PeerId`] implements both `Display` and `FromStr` which can be used to 94 | /// (de)serialise to human-readable and relatively safely transferrable strings. 95 | #[derive(Clone, PartialEq, Eq, Copy, Serialize, Deserialize)] 96 | pub struct PeerId(PublicKey); 97 | 98 | impl From for PeerId { 99 | fn from(key: PublicKey) -> Self { 100 | PeerId(key) 101 | } 102 | } 103 | 104 | impl Debug for PeerId { 105 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 106 | write!(f, "PeerId({})", util::encode(self.0.as_bytes())) 107 | } 108 | } 109 | 110 | /// Serialises the [`PeerId`] to base64. 111 | /// 112 | /// [`FromStr`] is capable of deserialising this format. 113 | impl Display for PeerId { 114 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 115 | write!(f, "{}", util::encode(self.0.as_bytes())) 116 | } 117 | } 118 | 119 | /// Error when deserialising a [`PeerId`]. 120 | #[derive(thiserror::Error, Debug)] 121 | pub enum PeerIdError { 122 | /// Error when decoding the base64. 123 | #[error("encoding: {0}")] 124 | Base64(#[from] base64::DecodeError), 125 | /// Error when decoding the public key. 126 | #[error("key: {0}")] 127 | Key(#[from] ed25519_dalek::SignatureError), 128 | } 129 | 130 | /// Deserialises the [`PeerId`] from it's base64 encoding. 131 | /// 132 | /// [`Display`] is capable of serialising this format. 133 | impl FromStr for PeerId { 134 | type Err = PeerIdError; 135 | 136 | fn from_str(s: &str) -> Result { 137 | let bytes = util::decode(s)?; 138 | let key = PublicKey::from_bytes(&bytes)?; 139 | Ok(PeerId(key)) 140 | } 141 | } 142 | 143 | /// Create a TLS client configuration. 144 | /// 145 | /// If *keylog* is `true` this will enable logging of the pre-master key to the file in the 146 | /// `SSLKEYLOGFILE` environment variable. This can be used to inspect the traffic for 147 | /// debugging purposes. 148 | pub fn make_client_config( 149 | keypair: &Keypair, 150 | remote_peer_id: Option, 151 | keylog: bool, 152 | ) -> Result { 153 | let (certificate, private_key) = certificate::generate(keypair)?; 154 | 155 | let mut crypto = rustls::ClientConfig::builder() 156 | .with_cipher_suites(verifier::CIPHERSUITES) 157 | .with_safe_default_kx_groups() 158 | .with_protocol_versions(verifier::PROTOCOL_VERSIONS) 159 | .expect("Cipher suites and kx groups are configured; qed") 160 | .with_custom_certificate_verifier(Arc::new( 161 | verifier::Libp2pCertificateVerifier::with_remote_peer_id(remote_peer_id), 162 | )) 163 | .with_single_cert(vec![certificate], private_key) 164 | .expect("Client cert key DER is valid; qed"); 165 | crypto.alpn_protocols = vec![P2P_ALPN.to_vec()]; 166 | if keylog { 167 | crypto.key_log = Arc::new(rustls::KeyLogFile::new()); 168 | } 169 | 170 | Ok(crypto) 171 | } 172 | 173 | /// Create a TLS server configuration. 174 | /// 175 | /// If *keylog* is `true` this will enable logging of the pre-master key to the file in the 176 | /// `SSLKEYLOGFILE` environment variable. This can be used to inspect the traffic for 177 | /// debugging purposes. 178 | pub fn make_server_config( 179 | keypair: &Keypair, 180 | keylog: bool, 181 | ) -> Result { 182 | let (certificate, private_key) = certificate::generate(keypair)?; 183 | 184 | let mut crypto = rustls::ServerConfig::builder() 185 | .with_cipher_suites(verifier::CIPHERSUITES) 186 | .with_safe_default_kx_groups() 187 | .with_protocol_versions(verifier::PROTOCOL_VERSIONS) 188 | .expect("Cipher suites and kx groups are configured; qed") 189 | .with_client_cert_verifier(Arc::new(verifier::Libp2pCertificateVerifier::new())) 190 | .with_single_cert(vec![certificate], private_key) 191 | .expect("Server cert key DER is valid; qed"); 192 | crypto.alpn_protocols = vec![P2P_ALPN.to_vec()]; 193 | if keylog { 194 | crypto.key_log = Arc::new(rustls::KeyLogFile::new()); 195 | } 196 | Ok(crypto) 197 | } 198 | 199 | #[cfg(test)] 200 | mod tests { 201 | use super::*; 202 | 203 | #[test] 204 | fn test_keypair_openssh_roundtrip() { 205 | let kp = Keypair::generate(); 206 | let ser = kp.to_openssh().unwrap(); 207 | let de = Keypair::try_from_openssh(&ser).unwrap(); 208 | assert_eq!(kp.to_bytes(), de.to_bytes()); 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/tls/certificate.rs: -------------------------------------------------------------------------------- 1 | //! X.509 certificate handling. 2 | //! 3 | //! This module handles generation, signing, and verification of certificates. 4 | 5 | use der::{asn1::OctetStringRef, Decode, Encode, Sequence}; 6 | use x509_parser::prelude::*; 7 | 8 | use super::{Keypair, PeerId, PublicKey, Signature}; 9 | 10 | /// The libp2p Public Key Extension is a X.509 extension 11 | /// with the Object Identier 1.3.6.1.4.1.53594.1.1, 12 | /// allocated by IANA to the libp2p project at Protocol Labs. 13 | const P2P_EXT_OID: [u64; 9] = [1, 3, 6, 1, 4, 1, 53594, 1, 1]; 14 | 15 | /// The peer signs the concatenation of the string `libp2p-tls-handshake:` 16 | /// and the public key that it used to generate the certificate carrying 17 | /// the libp2p Public Key Extension, using its private host key. 18 | /// This signature provides cryptographic proof that the peer was 19 | /// in possession of the private host key at the time the certificate was signed. 20 | const P2P_SIGNING_PREFIX: [u8; 21] = *b"libp2p-tls-handshake:"; 21 | 22 | // Certificates MUST use the NamedCurve encoding for elliptic curve parameters. 23 | // Similarly, hash functions with an output length less than 256 bits MUST NOT be used. 24 | static P2P_SIGNATURE_ALGORITHM: &rcgen::SignatureAlgorithm = &rcgen::PKCS_ECDSA_P256_SHA256; 25 | 26 | /// The public host key and the signature are ANS.1-encoded 27 | /// into the SignedKey data structure, which is carried in the libp2p Public Key Extension. 28 | #[derive(Clone, Debug, Eq, PartialEq, Sequence)] 29 | struct SignedKey<'a> { 30 | public_key: OctetStringRef<'a>, 31 | signature: OctetStringRef<'a>, 32 | } 33 | 34 | /// Generates a self-signed TLS certificate that includes a libp2p-specific 35 | /// certificate extension containing the public key of the given keypair. 36 | pub fn generate( 37 | identity_keypair: &Keypair, 38 | ) -> Result<(rustls::Certificate, rustls::PrivateKey), GenError> { 39 | // Keypair used to sign the certificate. 40 | // SHOULD NOT be related to the host's key. 41 | // Endpoints MAY generate a new key and certificate 42 | // for every connection attempt, or they MAY reuse the same key 43 | // and certificate for multiple connections. 44 | let certificate_keypair = rcgen::KeyPair::generate(P2P_SIGNATURE_ALGORITHM)?; 45 | let rustls_key = rustls::PrivateKey(certificate_keypair.serialize_der()); 46 | 47 | let certificate = { 48 | let mut params = rcgen::CertificateParams::new(vec![]); 49 | params.distinguished_name = rcgen::DistinguishedName::new(); 50 | params.custom_extensions.push(make_libp2p_extension( 51 | identity_keypair, 52 | &certificate_keypair, 53 | )?); 54 | params.alg = P2P_SIGNATURE_ALGORITHM; 55 | params.key_pair = Some(certificate_keypair); 56 | rcgen::Certificate::from_params(params)? 57 | }; 58 | 59 | let rustls_certificate = rustls::Certificate(certificate.serialize_der()?); 60 | 61 | Ok((rustls_certificate, rustls_key)) 62 | } 63 | 64 | /// Attempts to parse the provided bytes as a [`P2pCertificate`]. 65 | /// 66 | /// For this to succeed, the certificate must contain the specified extension and the signature must 67 | /// match the embedded public key. 68 | pub fn parse(certificate: &rustls::Certificate) -> Result, ParseError> { 69 | let certificate = parse_unverified(certificate.as_ref())?; 70 | 71 | certificate.verify()?; 72 | 73 | Ok(certificate) 74 | } 75 | 76 | /// An X.509 certificate with a libp2p-specific extension 77 | /// is used to secure libp2p connections. 78 | #[derive(Debug)] 79 | pub struct P2pCertificate<'a> { 80 | certificate: X509Certificate<'a>, 81 | /// This is a specific libp2p Public Key Extension with two values: 82 | /// * the public host key 83 | /// * a signature performed using the private host key 84 | extension: P2pExtension, 85 | } 86 | 87 | /// The contents of the specific libp2p extension, containing the public host key 88 | /// and a signature performed using the private host key. 89 | #[derive(Debug)] 90 | pub struct P2pExtension { 91 | public_key: super::PublicKey, 92 | /// This signature provides cryptographic proof that the peer was 93 | /// in possession of the private host key at the time the certificate was signed. 94 | signature: super::Signature, 95 | } 96 | 97 | #[derive(Debug, thiserror::Error)] 98 | #[error(transparent)] 99 | pub struct GenError(#[from] rcgen::RcgenError); 100 | 101 | #[derive(Debug, thiserror::Error)] 102 | #[error(transparent)] 103 | pub struct ParseError(#[from] pub(crate) webpki::Error); 104 | 105 | #[derive(Debug, thiserror::Error)] 106 | #[error(transparent)] 107 | pub struct VerificationError(#[from] pub(crate) webpki::Error); 108 | /// Internal function that only parses but does not verify the certificate. 109 | /// 110 | /// Useful for testing but unsuitable for production. 111 | fn parse_unverified(der_input: &[u8]) -> Result { 112 | let x509 = X509Certificate::from_der(der_input) 113 | .map(|(_rest_input, x509)| x509) 114 | .map_err(|_| webpki::Error::BadDer)?; 115 | 116 | let p2p_ext_oid = der_parser::oid::Oid::from(&P2P_EXT_OID) 117 | .expect("This is a valid OID of p2p extension; qed"); 118 | 119 | let mut libp2p_extension = None; 120 | 121 | for ext in x509.extensions() { 122 | let oid = &ext.oid; 123 | if oid == &p2p_ext_oid && libp2p_extension.is_some() { 124 | // The extension was already parsed 125 | return Err(webpki::Error::BadDer); 126 | } 127 | 128 | if oid == &p2p_ext_oid { 129 | let signed_key = 130 | SignedKey::from_der(ext.value).map_err(|_| webpki::Error::ExtensionValueInvalid)?; 131 | let public_key = PublicKey::from_bytes(signed_key.public_key.as_bytes()) 132 | .map_err(|_| webpki::Error::UnknownIssuer)?; 133 | let signature = Signature::from_bytes(signed_key.signature.as_bytes()) 134 | .map_err(|_| webpki::Error::UnknownIssuer)?; 135 | let ext = P2pExtension { 136 | public_key, 137 | signature, 138 | }; 139 | libp2p_extension = Some(ext); 140 | continue; 141 | } 142 | 143 | if ext.critical { 144 | // Endpoints MUST abort the connection attempt if the certificate 145 | // contains critical extensions that the endpoint does not understand. 146 | return Err(webpki::Error::UnsupportedCriticalExtension); 147 | } 148 | 149 | // Implementations MUST ignore non-critical extensions with unknown OIDs. 150 | } 151 | 152 | // The certificate MUST contain the libp2p Public Key Extension. 153 | // If this extension is missing, endpoints MUST abort the connection attempt. 154 | let extension = libp2p_extension.ok_or(webpki::Error::BadDer)?; 155 | 156 | let certificate = P2pCertificate { 157 | certificate: x509, 158 | extension, 159 | }; 160 | 161 | Ok(certificate) 162 | } 163 | 164 | fn make_libp2p_extension( 165 | identity_keypair: &Keypair, 166 | certificate_keypair: &rcgen::KeyPair, 167 | ) -> Result { 168 | // The peer signs the concatenation of the string `libp2p-tls-handshake:` 169 | // and the public key that it used to generate the certificate carrying 170 | // the libp2p Public Key Extension, using its private host key. 171 | let signature = { 172 | let mut msg = vec![]; 173 | msg.extend(P2P_SIGNING_PREFIX); 174 | msg.extend(certificate_keypair.public_key_der()); 175 | 176 | identity_keypair.sign(&msg) 177 | }; 178 | 179 | let public_key = identity_keypair.public(); 180 | let signature = signature.to_bytes(); 181 | let key = SignedKey { 182 | public_key: OctetStringRef::new(&public_key.as_bytes()[..]).unwrap(), 183 | signature: OctetStringRef::new(&signature).unwrap(), 184 | }; 185 | 186 | let extension_content = key.to_vec().expect("vec"); 187 | 188 | // This extension MAY be marked critical. 189 | let mut ext = rcgen::CustomExtension::from_oid_content(&P2P_EXT_OID, extension_content); 190 | ext.set_criticality(true); 191 | 192 | Ok(ext) 193 | } 194 | 195 | impl P2pCertificate<'_> { 196 | /// The [`PeerId`] of the remote peer. 197 | pub fn peer_id(&self) -> PeerId { 198 | self.extension.public_key.into() 199 | } 200 | 201 | /// Verify the `signature` of the `message` signed by the private key corresponding to the public key stored 202 | /// in the certificate. 203 | pub fn verify_signature( 204 | &self, 205 | signature_scheme: rustls::SignatureScheme, 206 | message: &[u8], 207 | signature: &[u8], 208 | ) -> Result<(), VerificationError> { 209 | let pk = self.public_key(signature_scheme)?; 210 | pk.verify(message, signature) 211 | .map_err(|_| webpki::Error::InvalidSignatureForPublicKey)?; 212 | 213 | Ok(()) 214 | } 215 | 216 | /// Get a [`ring::signature::UnparsedPublicKey`] for this `signature_scheme`. 217 | /// Return `Error` if the `signature_scheme` does not match the public key signature 218 | /// and hashing algorithm or if the `signature_scheme` is not supported. 219 | fn public_key( 220 | &self, 221 | signature_scheme: rustls::SignatureScheme, 222 | ) -> Result, webpki::Error> { 223 | use ring::signature; 224 | use rustls::SignatureScheme::*; 225 | 226 | let current_signature_scheme = self.signature_scheme()?; 227 | if signature_scheme != current_signature_scheme { 228 | // This certificate was signed with a different signature scheme 229 | return Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKey); 230 | } 231 | 232 | let verification_algorithm: &dyn signature::VerificationAlgorithm = match signature_scheme { 233 | ECDSA_NISTP256_SHA256 => &signature::ECDSA_P256_SHA256_ASN1, 234 | ECDSA_NISTP384_SHA384 => &signature::ECDSA_P384_SHA384_ASN1, 235 | ECDSA_NISTP521_SHA512 => { 236 | // See https://github.com/briansmith/ring/issues/824 237 | return Err(webpki::Error::UnsupportedSignatureAlgorithm); 238 | } 239 | ED25519 => &signature::ED25519, 240 | ED448 => { 241 | // See https://github.com/briansmith/ring/issues/463 242 | return Err(webpki::Error::UnsupportedSignatureAlgorithm); 243 | } 244 | // No support for RSA 245 | RSA_PKCS1_SHA256 | RSA_PKCS1_SHA384 | RSA_PKCS1_SHA512 | RSA_PSS_SHA256 246 | | RSA_PSS_SHA384 | RSA_PSS_SHA512 => { 247 | return Err(webpki::Error::UnsupportedSignatureAlgorithm) 248 | } 249 | // Similarly, hash functions with an output length less than 256 bits 250 | // MUST NOT be used, due to the possibility of collision attacks. 251 | // In particular, MD5 and SHA1 MUST NOT be used. 252 | RSA_PKCS1_SHA1 => return Err(webpki::Error::UnsupportedSignatureAlgorithm), 253 | ECDSA_SHA1_Legacy => return Err(webpki::Error::UnsupportedSignatureAlgorithm), 254 | Unknown(_) => return Err(webpki::Error::UnsupportedSignatureAlgorithm), 255 | }; 256 | let spki = &self.certificate.tbs_certificate.subject_pki; 257 | let key = signature::UnparsedPublicKey::new( 258 | verification_algorithm, 259 | spki.subject_public_key.as_ref(), 260 | ); 261 | 262 | Ok(key) 263 | } 264 | 265 | /// This method validates the certificate according to libp2p TLS 1.3 specs. 266 | /// The certificate MUST: 267 | /// 1. be valid at the time it is received by the peer; 268 | /// 2. use the NamedCurve encoding; 269 | /// 3. use hash functions with an output length not less than 256 bits; 270 | /// 4. be self signed; 271 | /// 5. contain a valid signature in the specific libp2p extension. 272 | fn verify(&self) -> Result<(), webpki::Error> { 273 | use ed25519_dalek::Verifier; 274 | use webpki::Error; 275 | 276 | // The certificate MUST have NotBefore and NotAfter fields set 277 | // such that the certificate is valid at the time it is received by the peer. 278 | if !self.certificate.validity().is_valid() { 279 | return Err(Error::InvalidCertValidity); 280 | } 281 | 282 | // Certificates MUST use the NamedCurve encoding for elliptic curve parameters. 283 | // Similarly, hash functions with an output length less than 256 bits 284 | // MUST NOT be used, due to the possibility of collision attacks. 285 | // In particular, MD5 and SHA1 MUST NOT be used. 286 | // Endpoints MUST abort the connection attempt if it is not used. 287 | let signature_scheme = self.signature_scheme()?; 288 | // Endpoints MUST abort the connection attempt if the certificate’s 289 | // self-signature is not valid. 290 | let raw_certificate = self.certificate.tbs_certificate.as_ref(); 291 | let signature = self.certificate.signature_value.as_ref(); 292 | // check if self signed 293 | self.verify_signature(signature_scheme, raw_certificate, signature) 294 | .map_err(|_| Error::SignatureAlgorithmMismatch)?; 295 | 296 | let subject_pki = self.certificate.public_key().raw; 297 | 298 | // The peer signs the concatenation of the string `libp2p-tls-handshake:` 299 | // and the public key that it used to generate the certificate carrying 300 | // the libp2p Public Key Extension, using its private host key. 301 | let mut msg = vec![]; 302 | msg.extend(P2P_SIGNING_PREFIX); 303 | msg.extend(subject_pki); 304 | 305 | // This signature provides cryptographic proof that the peer was in possession 306 | // of the private host key at the time the certificate was signed. 307 | // Peers MUST verify the signature, and abort the connection attempt 308 | // if signature verification fails. 309 | let user_owns_sk = self 310 | .extension 311 | .public_key 312 | .verify(&msg, &self.extension.signature) 313 | .is_ok(); 314 | if !user_owns_sk { 315 | return Err(Error::UnknownIssuer); 316 | } 317 | 318 | Ok(()) 319 | } 320 | 321 | /// Return the signature scheme corresponding to [`AlgorithmIdentifier`]s 322 | /// of `subject_pki` and `signature_algorithm` 323 | /// according to . 324 | fn signature_scheme(&self) -> Result { 325 | // Certificates MUST use the NamedCurve encoding for elliptic curve parameters. 326 | // Endpoints MUST abort the connection attempt if it is not used. 327 | use oid_registry::*; 328 | use rustls::SignatureScheme::*; 329 | 330 | let signature_algorithm = &self.certificate.signature_algorithm; 331 | let pki_algorithm = &self.certificate.tbs_certificate.subject_pki.algorithm; 332 | 333 | if pki_algorithm.algorithm == OID_KEY_TYPE_EC_PUBLIC_KEY { 334 | let signature_param = pki_algorithm 335 | .parameters 336 | .as_ref() 337 | .ok_or(webpki::Error::BadDer)? 338 | .as_oid() 339 | .map_err(|_| webpki::Error::BadDer)?; 340 | if signature_param == OID_EC_P256 341 | && signature_algorithm.algorithm == OID_SIG_ECDSA_WITH_SHA256 342 | { 343 | return Ok(ECDSA_NISTP256_SHA256); 344 | } 345 | if signature_param == OID_NIST_EC_P384 346 | && signature_algorithm.algorithm == OID_SIG_ECDSA_WITH_SHA384 347 | { 348 | return Ok(ECDSA_NISTP384_SHA384); 349 | } 350 | if signature_param == OID_NIST_EC_P521 351 | && signature_algorithm.algorithm == OID_SIG_ECDSA_WITH_SHA512 352 | { 353 | return Ok(ECDSA_NISTP521_SHA512); 354 | } 355 | return Err(webpki::Error::UnsupportedSignatureAlgorithm); 356 | } 357 | 358 | if signature_algorithm.algorithm == OID_SIG_ED25519 { 359 | return Ok(ED25519); 360 | } 361 | if signature_algorithm.algorithm == OID_SIG_ED448 { 362 | return Ok(ED448); 363 | } 364 | 365 | Err(webpki::Error::UnsupportedSignatureAlgorithm) 366 | } 367 | } 368 | 369 | #[cfg(test)] 370 | mod tests { 371 | use super::*; 372 | 373 | #[test] 374 | fn sanity_check() { 375 | let keypair = Keypair::generate(); 376 | 377 | let (cert, _) = generate(&keypair).unwrap(); 378 | let parsed_cert = parse(&cert).unwrap(); 379 | 380 | assert!(parsed_cert.verify().is_ok()); 381 | assert_eq!(keypair.public(), parsed_cert.extension.public_key); 382 | } 383 | } 384 | -------------------------------------------------------------------------------- /src/tls/verifier.rs: -------------------------------------------------------------------------------- 1 | //! TLS 1.3 certificates and handshakes handling for libp2p 2 | //! 3 | //! This module handles a verification of a client/server certificate chain 4 | //! and signatures allegedly by the given certificates. 5 | 6 | use super::{certificate, PeerId}; 7 | use rustls::{ 8 | cipher_suite::{ 9 | TLS13_AES_128_GCM_SHA256, TLS13_AES_256_GCM_SHA384, TLS13_CHACHA20_POLY1305_SHA256, 10 | }, 11 | client::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, 12 | internal::msgs::handshake::DigitallySignedStruct, 13 | server::{ClientCertVerified, ClientCertVerifier}, 14 | Certificate, DistinguishedNames, SignatureScheme, SupportedCipherSuite, 15 | SupportedProtocolVersion, 16 | }; 17 | 18 | /// The protocol versions supported by this verifier. 19 | /// 20 | /// The spec says: 21 | /// 22 | /// > The libp2p handshake uses TLS 1.3 (and higher). 23 | /// > Endpoints MUST NOT negotiate lower TLS versions. 24 | pub static PROTOCOL_VERSIONS: &[&SupportedProtocolVersion] = &[&rustls::version::TLS13]; 25 | 26 | /// A list of the TLS 1.3 cipher suites supported by rustls. 27 | // By default rustls creates client/server configs with both 28 | // TLS 1.3 __and__ 1.2 cipher suites. But we don't need 1.2. 29 | pub static CIPHERSUITES: &[SupportedCipherSuite] = &[ 30 | // TLS1.3 suites 31 | TLS13_CHACHA20_POLY1305_SHA256, 32 | TLS13_AES_256_GCM_SHA384, 33 | TLS13_AES_128_GCM_SHA256, 34 | ]; 35 | 36 | /// Implementation of the `rustls` certificate verification traits for libp2p. 37 | /// 38 | /// Only TLS 1.3 is supported. TLS 1.2 should be disabled in the configuration of `rustls`. 39 | pub struct Libp2pCertificateVerifier { 40 | /// The peer ID we intend to connect to 41 | remote_peer_id: Option, 42 | } 43 | 44 | /// libp2p requires the following of X.509 server certificate chains: 45 | /// 46 | /// - Exactly one certificate must be presented. 47 | /// - The certificate must be self-signed. 48 | /// - The certificate must have a valid libp2p extension that includes a 49 | /// signature of its public key. 50 | impl Libp2pCertificateVerifier { 51 | pub fn new() -> Self { 52 | Self { 53 | remote_peer_id: None, 54 | } 55 | } 56 | pub fn with_remote_peer_id(remote_peer_id: Option) -> Self { 57 | Self { remote_peer_id } 58 | } 59 | 60 | /// Return the list of SignatureSchemes that this verifier will handle, 61 | /// in `verify_tls12_signature` and `verify_tls13_signature` calls. 62 | /// 63 | /// This should be in priority order, with the most preferred first. 64 | fn verification_schemes() -> Vec { 65 | vec![ 66 | // TODO SignatureScheme::ECDSA_NISTP521_SHA512 is not supported by `ring` yet 67 | SignatureScheme::ECDSA_NISTP384_SHA384, 68 | SignatureScheme::ECDSA_NISTP256_SHA256, 69 | // TODO SignatureScheme::ED448 is not supported by `ring` yet 70 | SignatureScheme::ED25519, 71 | // In particular, RSA SHOULD NOT be used. 72 | ] 73 | } 74 | } 75 | 76 | impl ServerCertVerifier for Libp2pCertificateVerifier { 77 | fn verify_server_cert( 78 | &self, 79 | end_entity: &Certificate, 80 | intermediates: &[Certificate], 81 | _server_name: &rustls::ServerName, 82 | _scts: &mut dyn Iterator, 83 | _ocsp_response: &[u8], 84 | _now: std::time::SystemTime, 85 | ) -> Result { 86 | let peer_id = verify_presented_certs(end_entity, intermediates)?; 87 | 88 | if let Some(ref remote_peer_id) = self.remote_peer_id { 89 | // The public host key allows the peer to calculate the peer ID of the peer 90 | // it is connecting to. Clients MUST verify that the peer ID derived from 91 | // the certificate matches the peer ID they intended to connect to, 92 | // and MUST abort the connection if there is a mismatch. 93 | if remote_peer_id != &peer_id { 94 | return Err(rustls::Error::PeerMisbehavedError( 95 | "Wrong peer ID in p2p extension".to_string(), 96 | )); 97 | } 98 | } 99 | 100 | Ok(ServerCertVerified::assertion()) 101 | } 102 | 103 | fn verify_tls12_signature( 104 | &self, 105 | _message: &[u8], 106 | _cert: &Certificate, 107 | _dss: &DigitallySignedStruct, 108 | ) -> Result { 109 | unreachable!("`PROTOCOL_VERSIONS` only allows TLS 1.3") 110 | } 111 | 112 | fn verify_tls13_signature( 113 | &self, 114 | message: &[u8], 115 | cert: &Certificate, 116 | dss: &DigitallySignedStruct, 117 | ) -> Result { 118 | verify_tls13_signature(cert, dss.scheme, message, dss.signature()) 119 | } 120 | 121 | fn supported_verify_schemes(&self) -> Vec { 122 | Self::verification_schemes() 123 | } 124 | } 125 | 126 | /// libp2p requires the following of X.509 client certificate chains: 127 | /// 128 | /// - Exactly one certificate must be presented. In particular, client 129 | /// authentication is mandatory in libp2p. 130 | /// - The certificate must be self-signed. 131 | /// - The certificate must have a valid libp2p extension that includes a 132 | /// signature of its public key. 133 | impl ClientCertVerifier for Libp2pCertificateVerifier { 134 | fn offer_client_auth(&self) -> bool { 135 | true 136 | } 137 | 138 | fn client_auth_root_subjects(&self) -> Option { 139 | Some(vec![]) 140 | } 141 | 142 | fn verify_client_cert( 143 | &self, 144 | end_entity: &Certificate, 145 | intermediates: &[Certificate], 146 | _now: std::time::SystemTime, 147 | ) -> Result { 148 | verify_presented_certs(end_entity, intermediates)?; 149 | 150 | Ok(ClientCertVerified::assertion()) 151 | } 152 | 153 | fn verify_tls12_signature( 154 | &self, 155 | _message: &[u8], 156 | _cert: &Certificate, 157 | _dss: &DigitallySignedStruct, 158 | ) -> Result { 159 | unreachable!("`PROTOCOL_VERSIONS` only allows TLS 1.3") 160 | } 161 | 162 | fn verify_tls13_signature( 163 | &self, 164 | message: &[u8], 165 | cert: &Certificate, 166 | dss: &DigitallySignedStruct, 167 | ) -> Result { 168 | verify_tls13_signature(cert, dss.scheme, message, dss.signature()) 169 | } 170 | 171 | fn supported_verify_schemes(&self) -> Vec { 172 | Self::verification_schemes() 173 | } 174 | } 175 | 176 | /// When receiving the certificate chain, an endpoint 177 | /// MUST check these conditions and abort the connection attempt if 178 | /// (a) the presented certificate is not yet valid, OR 179 | /// (b) if it is expired. 180 | /// Endpoints MUST abort the connection attempt if more than one certificate is received, 181 | /// or if the certificate’s self-signature is not valid. 182 | fn verify_presented_certs( 183 | end_entity: &Certificate, 184 | intermediates: &[Certificate], 185 | ) -> Result { 186 | if !intermediates.is_empty() { 187 | return Err(rustls::Error::General( 188 | "libp2p-tls requires exactly one certificate".into(), 189 | )); 190 | } 191 | 192 | let cert = certificate::parse(end_entity)?; 193 | 194 | Ok(cert.peer_id()) 195 | } 196 | 197 | fn verify_tls13_signature( 198 | cert: &Certificate, 199 | signature_scheme: SignatureScheme, 200 | message: &[u8], 201 | signature: &[u8], 202 | ) -> Result { 203 | certificate::parse(cert)?.verify_signature(signature_scheme, message, signature)?; 204 | 205 | Ok(HandshakeSignatureValid::assertion()) 206 | } 207 | 208 | impl From for rustls::Error { 209 | fn from(certificate::ParseError(e): certificate::ParseError) -> Self { 210 | use webpki::Error::*; 211 | match e { 212 | BadDer => rustls::Error::InvalidCertificateEncoding, 213 | e => rustls::Error::InvalidCertificateData(format!("invalid peer certificate: {e}")), 214 | } 215 | } 216 | } 217 | impl From for rustls::Error { 218 | fn from(certificate::VerificationError(e): certificate::VerificationError) -> Self { 219 | use webpki::Error::*; 220 | match e { 221 | InvalidSignatureForPublicKey => rustls::Error::InvalidCertificateSignature, 222 | UnsupportedSignatureAlgorithm | UnsupportedSignatureAlgorithmForPublicKey => { 223 | rustls::Error::InvalidCertificateSignatureType 224 | } 225 | e => rustls::Error::InvalidCertificateData(format!("invalid peer certificate: {e}")), 226 | } 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{self, Display}, 3 | str::FromStr, 4 | }; 5 | 6 | use anyhow::ensure; 7 | use base64::{engine::general_purpose, Engine as _}; 8 | use postcard::experimental::max_size::MaxSize; 9 | use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; 10 | 11 | /// Encode the given buffer into Base64 URL SAFE without padding. 12 | pub fn encode(buf: impl AsRef<[u8]>) -> String { 13 | general_purpose::URL_SAFE_NO_PAD.encode(buf.as_ref()) 14 | } 15 | 16 | /// Decode the given buffer from Base64 URL SAFE without padding. 17 | pub fn decode(buf: impl AsRef) -> Result, base64::DecodeError> { 18 | general_purpose::URL_SAFE_NO_PAD.decode(buf.as_ref()) 19 | } 20 | 21 | /// Hash type used throught. 22 | #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] 23 | pub struct Hash(blake3::Hash); 24 | 25 | impl Hash { 26 | /// Calculate the hash of the provide bytes. 27 | pub fn new(buf: impl AsRef<[u8]>) -> Self { 28 | let val = blake3::hash(buf.as_ref()); 29 | Hash(val) 30 | } 31 | } 32 | 33 | impl AsRef<[u8]> for Hash { 34 | fn as_ref(&self) -> &[u8] { 35 | self.0.as_bytes() 36 | } 37 | } 38 | 39 | impl From for blake3::Hash { 40 | fn from(value: Hash) -> Self { 41 | value.0 42 | } 43 | } 44 | 45 | impl From for Hash { 46 | fn from(value: blake3::Hash) -> Self { 47 | Hash(value) 48 | } 49 | } 50 | 51 | impl From<[u8; 32]> for Hash { 52 | fn from(value: [u8; 32]) -> Self { 53 | Hash(blake3::Hash::from(value)) 54 | } 55 | } 56 | 57 | impl Display for Hash { 58 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 59 | write!(f, "{}", encode(self.0.as_bytes())) 60 | } 61 | } 62 | 63 | impl FromStr for Hash { 64 | type Err = anyhow::Error; 65 | 66 | fn from_str(s: &str) -> Result { 67 | let mut arr = [0u8; 32]; 68 | let val = decode(s)?; 69 | ensure!( 70 | val.len() == 32, 71 | "invalid byte length, expected 32, got {}", 72 | val.len() 73 | ); 74 | arr.copy_from_slice(&val); 75 | let hash = blake3::Hash::from(arr); 76 | 77 | Ok(Hash(hash)) 78 | } 79 | } 80 | 81 | impl Serialize for Hash { 82 | fn serialize(&self, serializer: S) -> Result 83 | where 84 | S: Serializer, 85 | { 86 | serializer.serialize_bytes(self.0.as_bytes()) 87 | } 88 | } 89 | 90 | impl<'de> Deserialize<'de> for Hash { 91 | fn deserialize(deserializer: D) -> Result 92 | where 93 | D: Deserializer<'de>, 94 | { 95 | deserializer.deserialize_bytes(HashVisitor) 96 | } 97 | } 98 | 99 | struct HashVisitor; 100 | 101 | impl<'de> de::Visitor<'de> for HashVisitor { 102 | type Value = Hash; 103 | 104 | fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { 105 | write!(f, "an array of 32 bytes containing hash data") 106 | } 107 | 108 | fn visit_bytes(self, v: &[u8]) -> Result 109 | where 110 | E: de::Error, 111 | { 112 | let bytes: [u8; 32] = v.try_into().map_err(E::custom)?; 113 | Ok(Hash::from(bytes)) 114 | } 115 | } 116 | 117 | impl MaxSize for Hash { 118 | const POSTCARD_MAX_SIZE: usize = 32; 119 | } 120 | 121 | #[cfg(test)] 122 | mod tests { 123 | use super::*; 124 | 125 | #[test] 126 | fn test_hash() { 127 | let data = b"hello world"; 128 | let hash = Hash::new(data); 129 | 130 | let encoded = hash.to_string(); 131 | assert_eq!(encoded.parse::().unwrap(), hash); 132 | } 133 | } 134 | --------------------------------------------------------------------------------