├── .github └── workflows │ └── build.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── LICENSE.md ├── README.md ├── build.rs ├── rustfmt.toml ├── src ├── client.rs ├── crypto │ ├── chacha20.rs │ ├── chacha20poly1305.rs │ ├── mod.rs │ └── poly1305.rs ├── error.rs ├── headers │ ├── lnurl_auth_jwt.rs │ └── mod.rs ├── lib.rs ├── types.rs └── util │ ├── key_obfuscator.rs │ ├── mod.rs │ ├── retry.rs │ └── storable_builder.rs └── tests ├── lnurl_auth_jwt_tests.rs ├── retry_tests.rs └── tests.rs /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration Checks 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | build: 7 | strategy: 8 | matrix: 9 | toolchain: [ stable, beta, 1.63.0 ] # 1.63.0 is current MSRV for vss-client 10 | include: 11 | - toolchain: stable 12 | check-fmt: true 13 | - toolchain: 1.63.0 14 | msrv: true 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout source code 18 | uses: actions/checkout@v3 19 | - name: Install Protobuf compiler (protoc) 20 | run: sudo apt-get update && sudo apt-get -y install protobuf-compiler 21 | - name: Install Rust ${{ matrix.toolchain }} toolchain 22 | run: | 23 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} 24 | rustup override set ${{ matrix.toolchain }} 25 | - name: Pin packages to allow for MSRV 26 | if: matrix.msrv 27 | run: | 28 | cargo update -p proptest --precise "1.2.0" --verbose # proptest 1.3.0 requires rustc 1.64.0 29 | cargo update -p regex --precise "1.9.6" --verbose # regex 1.10.0 requires rustc 1.65.0 30 | cargo update -p tokio --precise "1.38.1" --verbose # tokio v1.39.0 requires rustc 1.70 or newer 31 | cargo update -p tokio-util --precise "0.7.11" --verbose # tokio-util v0.7.12 requires rustc 1.70 or newer 32 | - name: Build on Rust ${{ matrix.toolchain }} 33 | run: cargo build --verbose --color always 34 | - name: Check formatting 35 | if: matrix.check-fmt 36 | run: rustup component add rustfmt && cargo fmt --all -- --check 37 | - name: Test on Rust ${{ matrix.toolchain }} 38 | run: cargo test 39 | - name: Cargo check release on Rust ${{ matrix.toolchain }} 40 | run: cargo check --release 41 | - name: Cargo check doc on Rust ${{ matrix.toolchain }} 42 | run: cargo doc --release 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /src/proto/ 3 | /Cargo.lock 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vss-client" 3 | version = "0.3.1" 4 | rust-version = "1.63.0" 5 | license = "MIT OR Apache-2.0" 6 | edition = "2021" 7 | homepage = "https://lightningdevkit.org/" 8 | repository = "https://github.com/lightningdevkit/vss-rust-client" 9 | description = "Client-side library to interact with Versioned Storage Service (VSS)." 10 | keywords = ["vss", "bitcoin", "lightning", "ldk", "bdk"] 11 | categories = ["web-programming::http-client", "cryptography::cryptocurrencies"] 12 | 13 | build = "build.rs" 14 | 15 | [features] 16 | default = ["lnurl-auth"] 17 | lnurl-auth = ["dep:bitcoin", "dep:url", "dep:serde", "dep:serde_json", "reqwest/json"] 18 | 19 | [dependencies] 20 | prost = "0.11.6" 21 | reqwest = { version = "0.11.13", default-features = false, features = ["rustls-tls"] } 22 | tokio = { version = "1", default-features = false, features = ["time"] } 23 | rand = "0.8.5" 24 | async-trait = "0.1.77" 25 | bitcoin = { version = "0.32.2", default-features = false, features = ["std", "rand-std"], optional = true } 26 | url = { version = "2.5.0", default-features = false, optional = true } 27 | base64 = { version = "0.21.7", default-features = false} 28 | serde = { version = "1.0.196", default-features = false, features = ["serde_derive"], optional = true } 29 | serde_json = { version = "1.0.113", default-features = false, optional = true } 30 | 31 | bitcoin_hashes = "0.14.0" 32 | 33 | [target.'cfg(genproto)'.build-dependencies] 34 | prost-build = { version = "0.11.3" } 35 | reqwest = { version = "0.11.13", default-features = false, features = ["rustls-tls", "blocking"] } 36 | 37 | [dev-dependencies] 38 | mockito = "0.28.0" 39 | proptest = "1.1.0" 40 | tokio = { version = "1.22.0", features = ["macros"]} 41 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so, 8 | subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | This software is licensed under Apache 2.0 or MIT, at your option. 2 | 3 | Some files retain their own copyright notice, however, for full authorship information, see version control history. 4 | 5 | Except as otherwise noted in individual files, all files in this repository are licensed under the Apache License, Version 2.0 or the MIT license , at your option. 6 | 7 | You may not use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of this software or any files in this repository except in accordance with one or both of these licenses. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vss-rust-client 2 | Client-side library to interact with Versioned Storage Service (VSS). 3 | 4 | VSS is an open-source project designed to offer a server-side cloud storage solution specifically 5 | tailored for noncustodial Lightning supporting mobile wallets. Its primary objective is to 6 | simplify the development process for Lightning wallets by providing a secure means to store 7 | and manage the essential state required for Lightning Network (LN) operations. 8 | 9 | Learn more [here](https://github.com/lightningdevkit/vss-server/blob/main/README.md). 10 | 11 | ## MSRV 12 | The Minimum Supported Rust Version (MSRV) is currently 1.63.0. -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | #[cfg(genproto)] 2 | extern crate prost_build; 3 | #[cfg(genproto)] 4 | use std::{env, fs, fs::File, path::Path}; 5 | 6 | /// To generate updated proto objects: 7 | /// 1. Place `vss.proto` file in `src/proto/` 8 | /// 2. run `RUSTFLAGS="--cfg genproto" cargo build` 9 | fn main() { 10 | #[cfg(genproto)] 11 | generate_protos(); 12 | } 13 | 14 | #[cfg(genproto)] 15 | fn generate_protos() { 16 | download_file( 17 | "https://raw.githubusercontent.com/lightningdevkit/vss-server/7f492fcac0c561b212f49ca40f7d16075822440f/app/src/main/proto/vss.proto", 18 | "src/proto/vss.proto", 19 | ).unwrap(); 20 | 21 | prost_build::compile_protos(&["src/proto/vss.proto"], &["src/"]).unwrap(); 22 | let from_path = Path::new(&env::var("OUT_DIR").unwrap()).join("vss.rs"); 23 | fs::copy(from_path, "src/types.rs").unwrap(); 24 | } 25 | 26 | #[cfg(genproto)] 27 | fn download_file(url: &str, save_to: &str) -> Result<(), Box> { 28 | let mut response = reqwest::blocking::get(url)?; 29 | fs::create_dir_all(Path::new(save_to).parent().unwrap())?; 30 | let mut out_file = File::create(save_to)?; 31 | response.copy_to(&mut out_file)?; 32 | Ok(()) 33 | } 34 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | use_small_heuristics = "Max" 2 | fn_params_layout = "Compressed" 3 | hard_tabs = true 4 | use_field_init_shorthand = true 5 | max_width = 100 6 | match_block_trailing_comma = true 7 | # UNSTABLE: format_code_in_doc_comments = true 8 | # UNSTABLE: overflow_delimited_expr = true 9 | # UNSTABLE: comment_width = 100 10 | # UNSTABLE: format_macro_matchers = true 11 | # UNSTABLE: format_strings = true 12 | # UNSTABLE: group_imports = "StdExternalCrate" 13 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use prost::Message; 2 | use reqwest::header::CONTENT_TYPE; 3 | use reqwest::Client; 4 | use std::collections::HashMap; 5 | use std::default::Default; 6 | use std::sync::Arc; 7 | 8 | use crate::error::VssError; 9 | use crate::headers::{get_headermap, FixedHeaders, VssHeaderProvider}; 10 | use crate::types::{ 11 | DeleteObjectRequest, DeleteObjectResponse, GetObjectRequest, GetObjectResponse, 12 | ListKeyVersionsRequest, ListKeyVersionsResponse, PutObjectRequest, PutObjectResponse, 13 | }; 14 | use crate::util::retry::{retry, RetryPolicy}; 15 | 16 | const APPLICATION_OCTET_STREAM: &str = "application/octet-stream"; 17 | 18 | /// Thin-client to access a hosted instance of Versioned Storage Service (VSS). 19 | /// The provided [`VssClient`] API is minimalistic and is congruent to the VSS server-side API. 20 | #[derive(Clone)] 21 | pub struct VssClient 22 | where 23 | R: RetryPolicy, 24 | { 25 | base_url: String, 26 | client: Client, 27 | retry_policy: R, 28 | header_provider: Arc, 29 | } 30 | 31 | impl> VssClient { 32 | /// Constructs a [`VssClient`] using `base_url` as the VSS server endpoint. 33 | pub fn new(base_url: String, retry_policy: R) -> Self { 34 | let client = Client::new(); 35 | Self::from_client(base_url, client, retry_policy) 36 | } 37 | 38 | /// Constructs a [`VssClient`] from a given [`reqwest::Client`], using `base_url` as the VSS server endpoint. 39 | pub fn from_client(base_url: String, client: Client, retry_policy: R) -> Self { 40 | Self { 41 | base_url, 42 | client, 43 | retry_policy, 44 | header_provider: Arc::new(FixedHeaders::new(HashMap::new())), 45 | } 46 | } 47 | 48 | /// Constructs a [`VssClient`] using `base_url` as the VSS server endpoint. 49 | /// 50 | /// HTTP headers will be provided by the given `header_provider`. 51 | pub fn new_with_headers( 52 | base_url: String, retry_policy: R, header_provider: Arc, 53 | ) -> Self { 54 | let client = Client::new(); 55 | Self { base_url, client, retry_policy, header_provider } 56 | } 57 | 58 | /// Returns the underlying base URL. 59 | pub fn base_url(&self) -> &str { 60 | &self.base_url 61 | } 62 | 63 | /// Fetches a value against a given `key` in `request`. 64 | /// Makes a service call to the `GetObject` endpoint of the VSS server. 65 | /// For API contract/usage, refer to docs for [`GetObjectRequest`] and [`GetObjectResponse`]. 66 | pub async fn get_object( 67 | &self, request: &GetObjectRequest, 68 | ) -> Result { 69 | retry( 70 | || async { 71 | let url = format!("{}/getObject", self.base_url); 72 | self.post_request(request, &url).await.and_then(|response: GetObjectResponse| { 73 | if response.value.is_none() { 74 | Err(VssError::InternalServerError( 75 | "VSS Server API Violation, expected value in GetObjectResponse but found none".to_string(), 76 | )) 77 | } else { 78 | Ok(response) 79 | } 80 | }) 81 | }, 82 | &self.retry_policy, 83 | ) 84 | .await 85 | } 86 | 87 | /// Writes multiple [`PutObjectRequest::transaction_items`] as part of a single transaction. 88 | /// Makes a service call to the `PutObject` endpoint of the VSS server, with multiple items. 89 | /// Items in the `request` are written in a single all-or-nothing transaction. 90 | /// For API contract/usage, refer to docs for [`PutObjectRequest`] and [`PutObjectResponse`]. 91 | pub async fn put_object( 92 | &self, request: &PutObjectRequest, 93 | ) -> Result { 94 | retry( 95 | || async { 96 | let url = format!("{}/putObjects", self.base_url); 97 | self.post_request(request, &url).await 98 | }, 99 | &self.retry_policy, 100 | ) 101 | .await 102 | } 103 | 104 | /// Deletes the given `key` and `value` in `request`. 105 | /// Makes a service call to the `DeleteObject` endpoint of the VSS server. 106 | /// For API contract/usage, refer to docs for [`DeleteObjectRequest`] and [`DeleteObjectResponse`]. 107 | pub async fn delete_object( 108 | &self, request: &DeleteObjectRequest, 109 | ) -> Result { 110 | retry( 111 | || async { 112 | let url = format!("{}/deleteObject", self.base_url); 113 | self.post_request(request, &url).await 114 | }, 115 | &self.retry_policy, 116 | ) 117 | .await 118 | } 119 | 120 | /// Lists keys and their corresponding version for a given [`ListKeyVersionsRequest::store_id`]. 121 | /// Makes a service call to the `ListKeyVersions` endpoint of the VSS server. 122 | /// For API contract/usage, refer to docs for [`ListKeyVersionsRequest`] and [`ListKeyVersionsResponse`]. 123 | pub async fn list_key_versions( 124 | &self, request: &ListKeyVersionsRequest, 125 | ) -> Result { 126 | retry( 127 | || async { 128 | let url = format!("{}/listKeyVersions", self.base_url); 129 | self.post_request(request, &url).await 130 | }, 131 | &self.retry_policy, 132 | ) 133 | .await 134 | } 135 | 136 | async fn post_request( 137 | &self, request: &Rq, url: &str, 138 | ) -> Result { 139 | let request_body = request.encode_to_vec(); 140 | let headermap = self 141 | .header_provider 142 | .get_headers(&request_body) 143 | .await 144 | .and_then(|h| get_headermap(&h)) 145 | .map_err(|e| VssError::AuthError(e.to_string()))?; 146 | let response_raw = self 147 | .client 148 | .post(url) 149 | .header(CONTENT_TYPE, APPLICATION_OCTET_STREAM) 150 | .headers(headermap) 151 | .body(request_body) 152 | .send() 153 | .await?; 154 | let status = response_raw.status(); 155 | let payload = response_raw.bytes().await?; 156 | 157 | if status.is_success() { 158 | let response = Rs::decode(&payload[..])?; 159 | Ok(response) 160 | } else { 161 | Err(VssError::new(status, payload)) 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/crypto/chacha20.rs: -------------------------------------------------------------------------------- 1 | // This file was stolen from rust-crypto. 2 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT 3 | // file at the top-level directory of this distribution and at 4 | // http://rust-lang.org/COPYRIGHT. 5 | // 6 | // This file is licensed under the Apache License, Version 2.0 or the MIT license 8 | // , at your option. 9 | // You may not use this file except in accordance with one or both of these 10 | // licenses. 11 | 12 | mod real_chacha { 13 | use core::cmp; 14 | use core::convert::TryInto; 15 | 16 | #[derive(Clone, Copy, PartialEq, Eq)] 17 | #[allow(non_camel_case_types)] 18 | struct u32x4(pub u32, pub u32, pub u32, pub u32); 19 | 20 | impl ::core::ops::Add for u32x4 { 21 | type Output = u32x4; 22 | fn add(self, rhs: u32x4) -> u32x4 { 23 | u32x4( 24 | self.0.wrapping_add(rhs.0), 25 | self.1.wrapping_add(rhs.1), 26 | self.2.wrapping_add(rhs.2), 27 | self.3.wrapping_add(rhs.3), 28 | ) 29 | } 30 | } 31 | 32 | impl ::core::ops::Sub for u32x4 { 33 | type Output = u32x4; 34 | fn sub(self, rhs: u32x4) -> u32x4 { 35 | u32x4( 36 | self.0.wrapping_sub(rhs.0), 37 | self.1.wrapping_sub(rhs.1), 38 | self.2.wrapping_sub(rhs.2), 39 | self.3.wrapping_sub(rhs.3), 40 | ) 41 | } 42 | } 43 | 44 | impl ::core::ops::BitXor for u32x4 { 45 | type Output = u32x4; 46 | fn bitxor(self, rhs: u32x4) -> u32x4 { 47 | u32x4(self.0 ^ rhs.0, self.1 ^ rhs.1, self.2 ^ rhs.2, self.3 ^ rhs.3) 48 | } 49 | } 50 | 51 | impl ::core::ops::Shr for u32x4 { 52 | type Output = u32x4; 53 | fn shr(self, rhs: u32x4) -> u32x4 { 54 | u32x4(self.0 >> rhs.0, self.1 >> rhs.1, self.2 >> rhs.2, self.3 >> rhs.3) 55 | } 56 | } 57 | 58 | impl ::core::ops::Shl for u32x4 { 59 | type Output = u32x4; 60 | fn shl(self, rhs: u32x4) -> u32x4 { 61 | u32x4(self.0 << rhs.0, self.1 << rhs.1, self.2 << rhs.2, self.3 << rhs.3) 62 | } 63 | } 64 | 65 | impl u32x4 { 66 | fn from_bytes(bytes: &[u8]) -> Self { 67 | assert_eq!(bytes.len(), 4 * 4); 68 | Self( 69 | u32::from_le_bytes(bytes[0 * 4..1 * 4].try_into().expect("len is 4")), 70 | u32::from_le_bytes(bytes[1 * 4..2 * 4].try_into().expect("len is 4")), 71 | u32::from_le_bytes(bytes[2 * 4..3 * 4].try_into().expect("len is 4")), 72 | u32::from_le_bytes(bytes[3 * 4..4 * 4].try_into().expect("len is 4")), 73 | ) 74 | } 75 | } 76 | 77 | const BLOCK_SIZE: usize = 64; 78 | 79 | #[derive(Clone, Copy)] 80 | struct ChaChaState { 81 | a: u32x4, 82 | b: u32x4, 83 | c: u32x4, 84 | d: u32x4, 85 | } 86 | 87 | #[derive(Copy)] 88 | pub struct ChaCha20 { 89 | state: ChaChaState, 90 | output: [u8; BLOCK_SIZE], 91 | offset: usize, 92 | } 93 | 94 | impl Clone for ChaCha20 { 95 | fn clone(&self) -> ChaCha20 { 96 | *self 97 | } 98 | } 99 | 100 | macro_rules! swizzle { 101 | ($b: expr, $c: expr, $d: expr) => {{ 102 | let u32x4(b10, b11, b12, b13) = $b; 103 | $b = u32x4(b11, b12, b13, b10); 104 | let u32x4(c10, c11, c12, c13) = $c; 105 | $c = u32x4(c12, c13, c10, c11); 106 | let u32x4(d10, d11, d12, d13) = $d; 107 | $d = u32x4(d13, d10, d11, d12); 108 | }}; 109 | } 110 | 111 | macro_rules! state_to_buffer { 112 | ($state: expr, $output: expr) => {{ 113 | let u32x4(a1, a2, a3, a4) = $state.a; 114 | let u32x4(b1, b2, b3, b4) = $state.b; 115 | let u32x4(c1, c2, c3, c4) = $state.c; 116 | let u32x4(d1, d2, d3, d4) = $state.d; 117 | let lens = [a1, a2, a3, a4, b1, b2, b3, b4, c1, c2, c3, c4, d1, d2, d3, d4]; 118 | for i in 0..lens.len() { 119 | $output[i * 4..(i + 1) * 4].copy_from_slice(&lens[i].to_le_bytes()); 120 | } 121 | }}; 122 | } 123 | 124 | macro_rules! round { 125 | ($state: expr) => {{ 126 | $state.a = $state.a + $state.b; 127 | rotate!($state.d, $state.a, S16); 128 | $state.c = $state.c + $state.d; 129 | rotate!($state.b, $state.c, S12); 130 | $state.a = $state.a + $state.b; 131 | rotate!($state.d, $state.a, S8); 132 | $state.c = $state.c + $state.d; 133 | rotate!($state.b, $state.c, S7); 134 | }}; 135 | } 136 | 137 | macro_rules! rotate { 138 | ($a: expr, $b: expr, $c:expr) => {{ 139 | let v = $a ^ $b; 140 | let r = S32 - $c; 141 | let right = v >> r; 142 | $a = (v << $c) ^ right 143 | }}; 144 | } 145 | 146 | const S32: u32x4 = u32x4(32, 32, 32, 32); 147 | const S16: u32x4 = u32x4(16, 16, 16, 16); 148 | const S12: u32x4 = u32x4(12, 12, 12, 12); 149 | const S8: u32x4 = u32x4(8, 8, 8, 8); 150 | const S7: u32x4 = u32x4(7, 7, 7, 7); 151 | 152 | impl ChaCha20 { 153 | pub fn new(key: &[u8], nonce: &[u8]) -> ChaCha20 { 154 | assert!(key.len() == 16 || key.len() == 32); 155 | assert!(nonce.len() == 8 || nonce.len() == 12); 156 | 157 | ChaCha20 { state: ChaCha20::expand(key, nonce), output: [0u8; BLOCK_SIZE], offset: 64 } 158 | } 159 | 160 | fn expand(key: &[u8], nonce: &[u8]) -> ChaChaState { 161 | let constant = match key.len() { 162 | 16 => b"expand 16-byte k", 163 | 32 => b"expand 32-byte k", 164 | _ => unreachable!(), 165 | }; 166 | ChaChaState { 167 | a: u32x4::from_bytes(&constant[0..16]), 168 | b: u32x4::from_bytes(&key[0..16]), 169 | c: if key.len() == 16 { 170 | u32x4::from_bytes(&key[0..16]) 171 | } else { 172 | u32x4::from_bytes(&key[16..32]) 173 | }, 174 | d: if nonce.len() == 16 { 175 | u32x4::from_bytes(&nonce[0..16]) 176 | } else if nonce.len() == 12 { 177 | let mut nonce4 = [0; 4 * 4]; 178 | nonce4[4..].copy_from_slice(nonce); 179 | u32x4::from_bytes(&nonce4) 180 | } else { 181 | let mut nonce4 = [0; 4 * 4]; 182 | nonce4[8..].copy_from_slice(nonce); 183 | u32x4::from_bytes(&nonce4) 184 | }, 185 | } 186 | } 187 | 188 | // put the the next BLOCK_SIZE keystream bytes into self.output 189 | fn update(&mut self) { 190 | let mut state = self.state; 191 | 192 | for _ in 0..10 { 193 | round!(state); 194 | swizzle!(state.b, state.c, state.d); 195 | round!(state); 196 | swizzle!(state.d, state.c, state.b); 197 | } 198 | state.a = state.a + self.state.a; 199 | state.b = state.b + self.state.b; 200 | state.c = state.c + self.state.c; 201 | state.d = state.d + self.state.d; 202 | 203 | state_to_buffer!(state, self.output); 204 | 205 | self.state.d = self.state.d + u32x4(1, 0, 0, 0); 206 | let u32x4(c12, _, _, _) = self.state.d; 207 | if c12 == 0 { 208 | // we could increment the other counter word with an 8 byte nonce 209 | // but other implementations like boringssl have this same 210 | // limitation 211 | panic!("counter is exhausted"); 212 | } 213 | 214 | self.offset = 0; 215 | } 216 | 217 | #[inline] // Useful cause input may be 0s on stack that should be optimized out 218 | pub fn process(&mut self, input: &[u8], output: &mut [u8]) { 219 | assert!(input.len() == output.len()); 220 | let len = input.len(); 221 | let mut i = 0; 222 | while i < len { 223 | // If there is no keystream available in the output buffer, 224 | // generate the next block. 225 | if self.offset == BLOCK_SIZE { 226 | self.update(); 227 | } 228 | 229 | // Process the min(available keystream, remaining input length). 230 | let count = cmp::min(BLOCK_SIZE - self.offset, len - i); 231 | // explicitly assert lengths to avoid bounds checks: 232 | assert!(output.len() >= i + count); 233 | assert!(input.len() >= i + count); 234 | assert!(self.output.len() >= self.offset + count); 235 | for j in 0..count { 236 | output[i + j] = input[i + j] ^ self.output[self.offset + j]; 237 | } 238 | i += count; 239 | self.offset += count; 240 | } 241 | } 242 | 243 | pub fn process_in_place(&mut self, input_output: &mut [u8]) { 244 | let len = input_output.len(); 245 | let mut i = 0; 246 | while i < len { 247 | // If there is no keystream available in the output buffer, 248 | // generate the next block. 249 | if self.offset == BLOCK_SIZE { 250 | self.update(); 251 | } 252 | 253 | // Process the min(available keystream, remaining input length). 254 | let count = cmp::min(BLOCK_SIZE - self.offset, len - i); 255 | // explicitly assert lengths to avoid bounds checks: 256 | assert!(input_output.len() >= i + count); 257 | assert!(self.output.len() >= self.offset + count); 258 | for j in 0..count { 259 | input_output[i + j] ^= self.output[self.offset + j]; 260 | } 261 | i += count; 262 | self.offset += count; 263 | } 264 | } 265 | } 266 | } 267 | 268 | pub use self::real_chacha::ChaCha20; 269 | 270 | #[cfg(test)] 271 | mod test { 272 | use core::iter::repeat; 273 | 274 | use super::ChaCha20; 275 | 276 | #[test] 277 | fn test_chacha20_256_tls_vectors() { 278 | struct TestVector { 279 | key: [u8; 32], 280 | nonce: [u8; 8], 281 | keystream: Vec, 282 | } 283 | // taken from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04 284 | let test_vectors = vec![ 285 | TestVector { 286 | key: [ 287 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 288 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 289 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 290 | ], 291 | nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 292 | keystream: vec![ 293 | 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90, 0x40, 0x5d, 0x6a, 0xe5, 0x53, 294 | 0x86, 0xbd, 0x28, 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, 0xa8, 0x36, 295 | 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7, 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 296 | 0x8d, 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37, 0x6a, 0x43, 0xb8, 0xf4, 297 | 0x15, 0x18, 0xa1, 0x1c, 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86, 298 | ], 299 | }, 300 | TestVector { 301 | key: [ 302 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 303 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 304 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 305 | ], 306 | nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 307 | keystream: vec![ 308 | 0x45, 0x40, 0xf0, 0x5a, 0x9f, 0x1f, 0xb2, 0x96, 0xd7, 0x73, 0x6e, 0x7b, 0x20, 309 | 0x8e, 0x3c, 0x96, 0xeb, 0x4f, 0xe1, 0x83, 0x46, 0x88, 0xd2, 0x60, 0x4f, 0x45, 310 | 0x09, 0x52, 0xed, 0x43, 0x2d, 0x41, 0xbb, 0xe2, 0xa0, 0xb6, 0xea, 0x75, 0x66, 311 | 0xd2, 0xa5, 0xd1, 0xe7, 0xe2, 0x0d, 0x42, 0xaf, 0x2c, 0x53, 0xd7, 0x92, 0xb1, 312 | 0xc4, 0x3f, 0xea, 0x81, 0x7e, 0x9a, 0xd2, 0x75, 0xae, 0x54, 0x69, 0x63, 313 | ], 314 | }, 315 | TestVector { 316 | key: [ 317 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 318 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 319 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 320 | ], 321 | nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01], 322 | keystream: vec![ 323 | 0xde, 0x9c, 0xba, 0x7b, 0xf3, 0xd6, 0x9e, 0xf5, 0xe7, 0x86, 0xdc, 0x63, 0x97, 324 | 0x3f, 0x65, 0x3a, 0x0b, 0x49, 0xe0, 0x15, 0xad, 0xbf, 0xf7, 0x13, 0x4f, 0xcb, 325 | 0x7d, 0xf1, 0x37, 0x82, 0x10, 0x31, 0xe8, 0x5a, 0x05, 0x02, 0x78, 0xa7, 0x08, 326 | 0x45, 0x27, 0x21, 0x4f, 0x73, 0xef, 0xc7, 0xfa, 0x5b, 0x52, 0x77, 0x06, 0x2e, 327 | 0xb7, 0xa0, 0x43, 0x3e, 0x44, 0x5f, 0x41, 0xe3, 328 | ], 329 | }, 330 | TestVector { 331 | key: [ 332 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 333 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 334 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 335 | ], 336 | nonce: [0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 337 | keystream: vec![ 338 | 0xef, 0x3f, 0xdf, 0xd6, 0xc6, 0x15, 0x78, 0xfb, 0xf5, 0xcf, 0x35, 0xbd, 0x3d, 339 | 0xd3, 0x3b, 0x80, 0x09, 0x63, 0x16, 0x34, 0xd2, 0x1e, 0x42, 0xac, 0x33, 0x96, 340 | 0x0b, 0xd1, 0x38, 0xe5, 0x0d, 0x32, 0x11, 0x1e, 0x4c, 0xaf, 0x23, 0x7e, 0xe5, 341 | 0x3c, 0xa8, 0xad, 0x64, 0x26, 0x19, 0x4a, 0x88, 0x54, 0x5d, 0xdc, 0x49, 0x7a, 342 | 0x0b, 0x46, 0x6e, 0x7d, 0x6b, 0xbd, 0xb0, 0x04, 0x1b, 0x2f, 0x58, 0x6b, 343 | ], 344 | }, 345 | TestVector { 346 | key: [ 347 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 348 | 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 349 | 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 350 | ], 351 | nonce: [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07], 352 | keystream: vec![ 353 | 0xf7, 0x98, 0xa1, 0x89, 0xf1, 0x95, 0xe6, 0x69, 0x82, 0x10, 0x5f, 0xfb, 0x64, 354 | 0x0b, 0xb7, 0x75, 0x7f, 0x57, 0x9d, 0xa3, 0x16, 0x02, 0xfc, 0x93, 0xec, 0x01, 355 | 0xac, 0x56, 0xf8, 0x5a, 0xc3, 0xc1, 0x34, 0xa4, 0x54, 0x7b, 0x73, 0x3b, 0x46, 356 | 0x41, 0x30, 0x42, 0xc9, 0x44, 0x00, 0x49, 0x17, 0x69, 0x05, 0xd3, 0xbe, 0x59, 357 | 0xea, 0x1c, 0x53, 0xf1, 0x59, 0x16, 0x15, 0x5c, 0x2b, 0xe8, 0x24, 0x1a, 0x38, 358 | 0x00, 0x8b, 0x9a, 0x26, 0xbc, 0x35, 0x94, 0x1e, 0x24, 0x44, 0x17, 0x7c, 0x8a, 359 | 0xde, 0x66, 0x89, 0xde, 0x95, 0x26, 0x49, 0x86, 0xd9, 0x58, 0x89, 0xfb, 0x60, 360 | 0xe8, 0x46, 0x29, 0xc9, 0xbd, 0x9a, 0x5a, 0xcb, 0x1c, 0xc1, 0x18, 0xbe, 0x56, 361 | 0x3e, 0xb9, 0xb3, 0xa4, 0xa4, 0x72, 0xf8, 0x2e, 0x09, 0xa7, 0xe7, 0x78, 0x49, 362 | 0x2b, 0x56, 0x2e, 0xf7, 0x13, 0x0e, 0x88, 0xdf, 0xe0, 0x31, 0xc7, 0x9d, 0xb9, 363 | 0xd4, 0xf7, 0xc7, 0xa8, 0x99, 0x15, 0x1b, 0x9a, 0x47, 0x50, 0x32, 0xb6, 0x3f, 364 | 0xc3, 0x85, 0x24, 0x5f, 0xe0, 0x54, 0xe3, 0xdd, 0x5a, 0x97, 0xa5, 0xf5, 0x76, 365 | 0xfe, 0x06, 0x40, 0x25, 0xd3, 0xce, 0x04, 0x2c, 0x56, 0x6a, 0xb2, 0xc5, 0x07, 366 | 0xb1, 0x38, 0xdb, 0x85, 0x3e, 0x3d, 0x69, 0x59, 0x66, 0x09, 0x96, 0x54, 0x6c, 367 | 0xc9, 0xc4, 0xa6, 0xea, 0xfd, 0xc7, 0x77, 0xc0, 0x40, 0xd7, 0x0e, 0xaf, 0x46, 368 | 0xf7, 0x6d, 0xad, 0x39, 0x79, 0xe5, 0xc5, 0x36, 0x0c, 0x33, 0x17, 0x16, 0x6a, 369 | 0x1c, 0x89, 0x4c, 0x94, 0xa3, 0x71, 0x87, 0x6a, 0x94, 0xdf, 0x76, 0x28, 0xfe, 370 | 0x4e, 0xaa, 0xf2, 0xcc, 0xb2, 0x7d, 0x5a, 0xaa, 0xe0, 0xad, 0x7a, 0xd0, 0xf9, 371 | 0xd4, 0xb6, 0xad, 0x3b, 0x54, 0x09, 0x87, 0x46, 0xd4, 0x52, 0x4d, 0x38, 0x40, 372 | 0x7a, 0x6d, 0xeb, 0x3a, 0xb7, 0x8f, 0xab, 0x78, 0xc9, 373 | ], 374 | }, 375 | ]; 376 | 377 | for tv in test_vectors.iter() { 378 | let mut c = ChaCha20::new(&tv.key, &tv.nonce); 379 | let input: Vec = repeat(0).take(tv.keystream.len()).collect(); 380 | let mut output: Vec = repeat(0).take(input.len()).collect(); 381 | c.process(&input[..], &mut output[..]); 382 | assert_eq!(output, tv.keystream); 383 | } 384 | } 385 | 386 | #[test] 387 | fn test_chacha20_256_tls_vectors_96_nonce() { 388 | struct TestVector { 389 | key: [u8; 32], 390 | nonce: [u8; 12], 391 | keystream: Vec, 392 | } 393 | // taken from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04 394 | let test_vectors = vec![ 395 | TestVector { 396 | key: [ 397 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 398 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 399 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 400 | ], 401 | nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 402 | keystream: vec![ 403 | 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90, 0x40, 0x5d, 0x6a, 0xe5, 0x53, 404 | 0x86, 0xbd, 0x28, 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, 0xa8, 0x36, 405 | 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7, 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 406 | 0x8d, 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37, 0x6a, 0x43, 0xb8, 0xf4, 407 | 0x15, 0x18, 0xa1, 0x1c, 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86, 408 | ], 409 | }, 410 | TestVector { 411 | key: [ 412 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 413 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 414 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 415 | ], 416 | nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 417 | keystream: vec![ 418 | 0x45, 0x40, 0xf0, 0x5a, 0x9f, 0x1f, 0xb2, 0x96, 0xd7, 0x73, 0x6e, 0x7b, 0x20, 419 | 0x8e, 0x3c, 0x96, 0xeb, 0x4f, 0xe1, 0x83, 0x46, 0x88, 0xd2, 0x60, 0x4f, 0x45, 420 | 0x09, 0x52, 0xed, 0x43, 0x2d, 0x41, 0xbb, 0xe2, 0xa0, 0xb6, 0xea, 0x75, 0x66, 421 | 0xd2, 0xa5, 0xd1, 0xe7, 0xe2, 0x0d, 0x42, 0xaf, 0x2c, 0x53, 0xd7, 0x92, 0xb1, 422 | 0xc4, 0x3f, 0xea, 0x81, 0x7e, 0x9a, 0xd2, 0x75, 0xae, 0x54, 0x69, 0x63, 423 | ], 424 | }, 425 | TestVector { 426 | key: [ 427 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 428 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 429 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 430 | ], 431 | nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01], 432 | keystream: vec![ 433 | 0xde, 0x9c, 0xba, 0x7b, 0xf3, 0xd6, 0x9e, 0xf5, 0xe7, 0x86, 0xdc, 0x63, 0x97, 434 | 0x3f, 0x65, 0x3a, 0x0b, 0x49, 0xe0, 0x15, 0xad, 0xbf, 0xf7, 0x13, 0x4f, 0xcb, 435 | 0x7d, 0xf1, 0x37, 0x82, 0x10, 0x31, 0xe8, 0x5a, 0x05, 0x02, 0x78, 0xa7, 0x08, 436 | 0x45, 0x27, 0x21, 0x4f, 0x73, 0xef, 0xc7, 0xfa, 0x5b, 0x52, 0x77, 0x06, 0x2e, 437 | 0xb7, 0xa0, 0x43, 0x3e, 0x44, 0x5f, 0x41, 0xe3, 438 | ], 439 | }, 440 | TestVector { 441 | key: [ 442 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 443 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 444 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 445 | ], 446 | nonce: [0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], 447 | keystream: vec![ 448 | 0xef, 0x3f, 0xdf, 0xd6, 0xc6, 0x15, 0x78, 0xfb, 0xf5, 0xcf, 0x35, 0xbd, 0x3d, 449 | 0xd3, 0x3b, 0x80, 0x09, 0x63, 0x16, 0x34, 0xd2, 0x1e, 0x42, 0xac, 0x33, 0x96, 450 | 0x0b, 0xd1, 0x38, 0xe5, 0x0d, 0x32, 0x11, 0x1e, 0x4c, 0xaf, 0x23, 0x7e, 0xe5, 451 | 0x3c, 0xa8, 0xad, 0x64, 0x26, 0x19, 0x4a, 0x88, 0x54, 0x5d, 0xdc, 0x49, 0x7a, 452 | 0x0b, 0x46, 0x6e, 0x7d, 0x6b, 0xbd, 0xb0, 0x04, 0x1b, 0x2f, 0x58, 0x6b, 453 | ], 454 | }, 455 | TestVector { 456 | key: [ 457 | 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 458 | 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 459 | 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 460 | ], 461 | nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07], 462 | keystream: vec![ 463 | 0xf7, 0x98, 0xa1, 0x89, 0xf1, 0x95, 0xe6, 0x69, 0x82, 0x10, 0x5f, 0xfb, 0x64, 464 | 0x0b, 0xb7, 0x75, 0x7f, 0x57, 0x9d, 0xa3, 0x16, 0x02, 0xfc, 0x93, 0xec, 0x01, 465 | 0xac, 0x56, 0xf8, 0x5a, 0xc3, 0xc1, 0x34, 0xa4, 0x54, 0x7b, 0x73, 0x3b, 0x46, 466 | 0x41, 0x30, 0x42, 0xc9, 0x44, 0x00, 0x49, 0x17, 0x69, 0x05, 0xd3, 0xbe, 0x59, 467 | 0xea, 0x1c, 0x53, 0xf1, 0x59, 0x16, 0x15, 0x5c, 0x2b, 0xe8, 0x24, 0x1a, 0x38, 468 | 0x00, 0x8b, 0x9a, 0x26, 0xbc, 0x35, 0x94, 0x1e, 0x24, 0x44, 0x17, 0x7c, 0x8a, 469 | 0xde, 0x66, 0x89, 0xde, 0x95, 0x26, 0x49, 0x86, 0xd9, 0x58, 0x89, 0xfb, 0x60, 470 | 0xe8, 0x46, 0x29, 0xc9, 0xbd, 0x9a, 0x5a, 0xcb, 0x1c, 0xc1, 0x18, 0xbe, 0x56, 471 | 0x3e, 0xb9, 0xb3, 0xa4, 0xa4, 0x72, 0xf8, 0x2e, 0x09, 0xa7, 0xe7, 0x78, 0x49, 472 | 0x2b, 0x56, 0x2e, 0xf7, 0x13, 0x0e, 0x88, 0xdf, 0xe0, 0x31, 0xc7, 0x9d, 0xb9, 473 | 0xd4, 0xf7, 0xc7, 0xa8, 0x99, 0x15, 0x1b, 0x9a, 0x47, 0x50, 0x32, 0xb6, 0x3f, 474 | 0xc3, 0x85, 0x24, 0x5f, 0xe0, 0x54, 0xe3, 0xdd, 0x5a, 0x97, 0xa5, 0xf5, 0x76, 475 | 0xfe, 0x06, 0x40, 0x25, 0xd3, 0xce, 0x04, 0x2c, 0x56, 0x6a, 0xb2, 0xc5, 0x07, 476 | 0xb1, 0x38, 0xdb, 0x85, 0x3e, 0x3d, 0x69, 0x59, 0x66, 0x09, 0x96, 0x54, 0x6c, 477 | 0xc9, 0xc4, 0xa6, 0xea, 0xfd, 0xc7, 0x77, 0xc0, 0x40, 0xd7, 0x0e, 0xaf, 0x46, 478 | 0xf7, 0x6d, 0xad, 0x39, 0x79, 0xe5, 0xc5, 0x36, 0x0c, 0x33, 0x17, 0x16, 0x6a, 479 | 0x1c, 0x89, 0x4c, 0x94, 0xa3, 0x71, 0x87, 0x6a, 0x94, 0xdf, 0x76, 0x28, 0xfe, 480 | 0x4e, 0xaa, 0xf2, 0xcc, 0xb2, 0x7d, 0x5a, 0xaa, 0xe0, 0xad, 0x7a, 0xd0, 0xf9, 481 | 0xd4, 0xb6, 0xad, 0x3b, 0x54, 0x09, 0x87, 0x46, 0xd4, 0x52, 0x4d, 0x38, 0x40, 482 | 0x7a, 0x6d, 0xeb, 0x3a, 0xb7, 0x8f, 0xab, 0x78, 0xc9, 483 | ], 484 | }, 485 | ]; 486 | 487 | for tv in test_vectors.iter() { 488 | let mut c = ChaCha20::new(&tv.key, &tv.nonce); 489 | let input: Vec = repeat(0).take(tv.keystream.len()).collect(); 490 | let mut output: Vec = repeat(0).take(input.len()).collect(); 491 | c.process(&input[..], &mut output[..]); 492 | assert_eq!(output, tv.keystream); 493 | } 494 | } 495 | } 496 | -------------------------------------------------------------------------------- /src/crypto/chacha20poly1305.rs: -------------------------------------------------------------------------------- 1 | // ring has a garbage API so its use is avoided, but rust-crypto doesn't have RFC-variant poly1305 2 | // Instead, we steal rust-crypto's implementation and tweak it to match the RFC. 3 | // 4 | // This file is licensed under the Apache License, Version 2.0 or the MIT license 6 | // , at your option. 7 | // You may not use this file except in accordance with one or both of these 8 | // licenses. 9 | // 10 | // This is a port of Andrew Moons poly1305-donna 11 | // https://github.com/floodyberry/poly1305-donna 12 | 13 | mod real_chachapoly { 14 | use crate::crypto::chacha20::ChaCha20; 15 | use crate::crypto::poly1305::Poly1305; 16 | use core::ptr::{read_volatile, write_volatile}; 17 | 18 | #[derive(Clone, Copy)] 19 | pub struct ChaCha20Poly1305 { 20 | cipher: ChaCha20, 21 | mac: Poly1305, 22 | finished: bool, 23 | data_len: usize, 24 | aad_len: u64, 25 | } 26 | 27 | #[allow(dead_code, unused)] 28 | impl ChaCha20Poly1305 { 29 | #[inline] 30 | fn pad_mac_16(mac: &mut Poly1305, len: usize) { 31 | if len % 16 != 0 { 32 | mac.input(&[0; 16][0..16 - (len % 16)]); 33 | } 34 | } 35 | pub fn new(key: &[u8], nonce: &[u8], aad: &[u8]) -> ChaCha20Poly1305 { 36 | assert!(key.len() == 16 || key.len() == 32); 37 | assert!(nonce.len() == 12); 38 | 39 | // Ehh, I'm too lazy to *also* tweak ChaCha20 to make it RFC-compliant 40 | assert!(nonce[0] == 0 && nonce[1] == 0 && nonce[2] == 0 && nonce[3] == 0); 41 | 42 | let mut cipher = ChaCha20::new(key, &nonce[4..]); 43 | let mut mac_key = [0u8; 64]; 44 | let zero_key = [0u8; 64]; 45 | cipher.process(&zero_key, &mut mac_key); 46 | 47 | let mut mac = Poly1305::new(&mac_key[..32]); 48 | mac.input(aad); 49 | ChaCha20Poly1305::pad_mac_16(&mut mac, aad.len()); 50 | 51 | ChaCha20Poly1305 { 52 | cipher, 53 | mac, 54 | finished: false, 55 | data_len: 0, 56 | aad_len: aad.len() as u64, 57 | } 58 | } 59 | 60 | pub fn encrypt(&mut self, input: &[u8], output: &mut [u8], out_tag: &mut [u8]) { 61 | assert!(input.len() == output.len()); 62 | assert!(self.finished == false); 63 | self.cipher.process(input, output); 64 | self.data_len += input.len(); 65 | self.mac.input(output); 66 | ChaCha20Poly1305::pad_mac_16(&mut self.mac, self.data_len); 67 | self.finished = true; 68 | self.mac.input(&self.aad_len.to_le_bytes()); 69 | self.mac.input(&(self.data_len as u64).to_le_bytes()); 70 | self.mac.raw_result(out_tag); 71 | } 72 | 73 | pub fn encrypt_inplace(&mut self, input_output: &mut [u8], out_tag: &mut [u8]) { 74 | assert!(self.finished == false); 75 | self.encrypt_in_place(input_output); 76 | self.finish_and_get_tag(out_tag); 77 | } 78 | 79 | pub fn decrypt_inplace(&mut self, input_output: &mut [u8], tag: &[u8]) -> Result<(), ()> { 80 | assert!(self.finished == false); 81 | self.decrypt_in_place(input_output); 82 | if self.finish_and_check_tag(tag) { 83 | Ok(()) 84 | } else { 85 | Err(()) 86 | } 87 | } 88 | 89 | // Encrypt `input_output` in-place. To finish and calculate the tag, use `finish_and_get_tag` 90 | // below. 91 | pub(super) fn encrypt_in_place(&mut self, input_output: &mut [u8]) { 92 | debug_assert!(self.finished == false); 93 | self.cipher.process_in_place(input_output); 94 | self.data_len += input_output.len(); 95 | self.mac.input(input_output); 96 | } 97 | 98 | // If we were previously encrypting with `encrypt_in_place`, this method can be used to finish 99 | // encrypting and calculate the tag. 100 | pub(super) fn finish_and_get_tag(&mut self, out_tag: &mut [u8]) { 101 | debug_assert!(self.finished == false); 102 | ChaCha20Poly1305::pad_mac_16(&mut self.mac, self.data_len); 103 | self.finished = true; 104 | self.mac.input(&self.aad_len.to_le_bytes()); 105 | self.mac.input(&(self.data_len as u64).to_le_bytes()); 106 | self.mac.raw_result(out_tag); 107 | } 108 | 109 | pub fn decrypt(&mut self, input: &[u8], output: &mut [u8], tag: &[u8]) -> bool { 110 | assert!(input.len() == output.len()); 111 | assert!(self.finished == false); 112 | 113 | self.finished = true; 114 | 115 | self.mac.input(input); 116 | 117 | self.data_len += input.len(); 118 | ChaCha20Poly1305::pad_mac_16(&mut self.mac, self.data_len); 119 | self.mac.input(&self.aad_len.to_le_bytes()); 120 | self.mac.input(&(self.data_len as u64).to_le_bytes()); 121 | 122 | let mut calc_tag = [0u8; 16]; 123 | self.mac.raw_result(&mut calc_tag); 124 | if ChaCha20Poly1305::fixed_time_eq(&calc_tag, tag) { 125 | self.cipher.process(input, output); 126 | true 127 | } else { 128 | false 129 | } 130 | } 131 | 132 | // Decrypt in place, without checking the tag. Use `finish_and_check_tag` to check it 133 | // later when decryption finishes. 134 | // 135 | // Should never be `pub` because the public API should always enforce tag checking. 136 | pub(super) fn decrypt_in_place(&mut self, input_output: &mut [u8]) { 137 | debug_assert!(self.finished == false); 138 | self.mac.input(input_output); 139 | self.data_len += input_output.len(); 140 | self.cipher.process_in_place(input_output); 141 | } 142 | 143 | // If we were previously decrypting with `decrypt_in_place`, this method must be used to finish 144 | // decrypting and check the tag. Returns whether the tag is valid. 145 | pub(super) fn finish_and_check_tag(&mut self, tag: &[u8]) -> bool { 146 | debug_assert!(self.finished == false); 147 | self.finished = true; 148 | ChaCha20Poly1305::pad_mac_16(&mut self.mac, self.data_len); 149 | self.mac.input(&self.aad_len.to_le_bytes()); 150 | self.mac.input(&(self.data_len as u64).to_le_bytes()); 151 | 152 | let mut calc_tag = [0u8; 16]; 153 | self.mac.raw_result(&mut calc_tag); 154 | if ChaCha20Poly1305::fixed_time_eq(&calc_tag, tag) { 155 | true 156 | } else { 157 | false 158 | } 159 | } 160 | pub(super) fn fixed_time_eq(a: &[u8], b: &[u8]) -> bool { 161 | assert!(a.len() == b.len()); 162 | let count = a.len(); 163 | let lhs = &a[..count]; 164 | let rhs = &b[..count]; 165 | 166 | let mut r: u8 = 0; 167 | for i in 0..count { 168 | let mut rs = unsafe { read_volatile(&r) }; 169 | rs |= lhs[i] ^ rhs[i]; 170 | unsafe { 171 | write_volatile(&mut r, rs); 172 | } 173 | } 174 | { 175 | let mut t = unsafe { read_volatile(&r) }; 176 | t |= t >> 4; 177 | unsafe { 178 | write_volatile(&mut r, t); 179 | } 180 | } 181 | { 182 | let mut t = unsafe { read_volatile(&r) }; 183 | t |= t >> 2; 184 | unsafe { 185 | write_volatile(&mut r, t); 186 | } 187 | } 188 | { 189 | let mut t = unsafe { read_volatile(&r) }; 190 | t |= t >> 1; 191 | unsafe { 192 | write_volatile(&mut r, t); 193 | } 194 | } 195 | unsafe { (read_volatile(&r) & 1) == 0 } 196 | } 197 | } 198 | } 199 | 200 | pub use self::real_chachapoly::ChaCha20Poly1305; 201 | -------------------------------------------------------------------------------- /src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod chacha20; 2 | pub(crate) mod chacha20poly1305; 3 | pub(crate) mod poly1305; 4 | -------------------------------------------------------------------------------- /src/crypto/poly1305.rs: -------------------------------------------------------------------------------- 1 | // This file is licensed under the Apache License, Version 2.0 or the MIT license 3 | // , at your option. 4 | // You may not use this file except in accordance with one or both of these 5 | // licenses. 6 | 7 | // This is a port of Andrew Moons poly1305-donna 8 | // https://github.com/floodyberry/poly1305-donna 9 | 10 | use core::cmp::min; 11 | use core::convert::TryInto; 12 | 13 | #[derive(Clone, Copy)] 14 | pub struct Poly1305 { 15 | r: [u32; 5], 16 | h: [u32; 5], 17 | pad: [u32; 4], 18 | leftover: usize, 19 | buffer: [u8; 16], 20 | finalized: bool, 21 | } 22 | 23 | #[allow(dead_code, unused)] 24 | impl Poly1305 { 25 | pub fn new(key: &[u8]) -> Poly1305 { 26 | assert!(key.len() == 32); 27 | let mut poly = Poly1305 { 28 | r: [0u32; 5], 29 | h: [0u32; 5], 30 | pad: [0u32; 4], 31 | leftover: 0, 32 | buffer: [0u8; 16], 33 | finalized: false, 34 | }; 35 | 36 | // r &= 0xffffffc0ffffffc0ffffffc0fffffff 37 | poly.r[0] = (u32::from_le_bytes(key[0..4].try_into().expect("len is 4"))) & 0x3ffffff; 38 | poly.r[1] = (u32::from_le_bytes(key[3..7].try_into().expect("len is 4")) >> 2) & 0x3ffff03; 39 | poly.r[2] = (u32::from_le_bytes(key[6..10].try_into().expect("len is 4")) >> 4) & 0x3ffc0ff; 40 | poly.r[3] = (u32::from_le_bytes(key[9..13].try_into().expect("len is 4")) >> 6) & 0x3f03fff; 41 | poly.r[4] = 42 | (u32::from_le_bytes(key[12..16].try_into().expect("len is 4")) >> 8) & 0x00fffff; 43 | 44 | poly.pad[0] = u32::from_le_bytes(key[16..20].try_into().expect("len is 4")); 45 | poly.pad[1] = u32::from_le_bytes(key[20..24].try_into().expect("len is 4")); 46 | poly.pad[2] = u32::from_le_bytes(key[24..28].try_into().expect("len is 4")); 47 | poly.pad[3] = u32::from_le_bytes(key[28..32].try_into().expect("len is 4")); 48 | 49 | poly 50 | } 51 | 52 | fn block(&mut self, m: &[u8]) { 53 | let hibit: u32 = if self.finalized { 0 } else { 1 << 24 }; 54 | 55 | let r0 = self.r[0]; 56 | let r1 = self.r[1]; 57 | let r2 = self.r[2]; 58 | let r3 = self.r[3]; 59 | let r4 = self.r[4]; 60 | 61 | let s1 = r1 * 5; 62 | let s2 = r2 * 5; 63 | let s3 = r3 * 5; 64 | let s4 = r4 * 5; 65 | 66 | let mut h0 = self.h[0]; 67 | let mut h1 = self.h[1]; 68 | let mut h2 = self.h[2]; 69 | let mut h3 = self.h[3]; 70 | let mut h4 = self.h[4]; 71 | 72 | // h += m 73 | h0 += (u32::from_le_bytes(m[0..4].try_into().expect("len is 4"))) & 0x3ffffff; 74 | h1 += (u32::from_le_bytes(m[3..7].try_into().expect("len is 4")) >> 2) & 0x3ffffff; 75 | h2 += (u32::from_le_bytes(m[6..10].try_into().expect("len is 4")) >> 4) & 0x3ffffff; 76 | h3 += (u32::from_le_bytes(m[9..13].try_into().expect("len is 4")) >> 6) & 0x3ffffff; 77 | h4 += (u32::from_le_bytes(m[12..16].try_into().expect("len is 4")) >> 8) | hibit; 78 | 79 | // h *= r 80 | let d0 = (h0 as u64 * r0 as u64) 81 | + (h1 as u64 * s4 as u64) 82 | + (h2 as u64 * s3 as u64) 83 | + (h3 as u64 * s2 as u64) 84 | + (h4 as u64 * s1 as u64); 85 | let mut d1 = (h0 as u64 * r1 as u64) 86 | + (h1 as u64 * r0 as u64) 87 | + (h2 as u64 * s4 as u64) 88 | + (h3 as u64 * s3 as u64) 89 | + (h4 as u64 * s2 as u64); 90 | let mut d2 = (h0 as u64 * r2 as u64) 91 | + (h1 as u64 * r1 as u64) 92 | + (h2 as u64 * r0 as u64) 93 | + (h3 as u64 * s4 as u64) 94 | + (h4 as u64 * s3 as u64); 95 | let mut d3 = (h0 as u64 * r3 as u64) 96 | + (h1 as u64 * r2 as u64) 97 | + (h2 as u64 * r1 as u64) 98 | + (h3 as u64 * r0 as u64) 99 | + (h4 as u64 * s4 as u64); 100 | let mut d4 = (h0 as u64 * r4 as u64) 101 | + (h1 as u64 * r3 as u64) 102 | + (h2 as u64 * r2 as u64) 103 | + (h3 as u64 * r1 as u64) 104 | + (h4 as u64 * r0 as u64); 105 | 106 | // (partial) h %= p 107 | let mut c: u32; 108 | c = (d0 >> 26) as u32; 109 | h0 = d0 as u32 & 0x3ffffff; 110 | d1 += c as u64; 111 | c = (d1 >> 26) as u32; 112 | h1 = d1 as u32 & 0x3ffffff; 113 | d2 += c as u64; 114 | c = (d2 >> 26) as u32; 115 | h2 = d2 as u32 & 0x3ffffff; 116 | d3 += c as u64; 117 | c = (d3 >> 26) as u32; 118 | h3 = d3 as u32 & 0x3ffffff; 119 | d4 += c as u64; 120 | c = (d4 >> 26) as u32; 121 | h4 = d4 as u32 & 0x3ffffff; 122 | h0 += c * 5; 123 | c = h0 >> 26; 124 | h0 = h0 & 0x3ffffff; 125 | h1 += c; 126 | 127 | self.h[0] = h0; 128 | self.h[1] = h1; 129 | self.h[2] = h2; 130 | self.h[3] = h3; 131 | self.h[4] = h4; 132 | } 133 | 134 | pub fn finish(&mut self) { 135 | if self.leftover > 0 { 136 | self.buffer[self.leftover] = 1; 137 | for i in self.leftover + 1..16 { 138 | self.buffer[i] = 0; 139 | } 140 | self.finalized = true; 141 | let tmp = self.buffer; 142 | self.block(&tmp); 143 | } 144 | 145 | // fully carry h 146 | let mut h0 = self.h[0]; 147 | let mut h1 = self.h[1]; 148 | let mut h2 = self.h[2]; 149 | let mut h3 = self.h[3]; 150 | let mut h4 = self.h[4]; 151 | 152 | let mut c: u32; 153 | c = h1 >> 26; 154 | h1 = h1 & 0x3ffffff; 155 | h2 += c; 156 | c = h2 >> 26; 157 | h2 = h2 & 0x3ffffff; 158 | h3 += c; 159 | c = h3 >> 26; 160 | h3 = h3 & 0x3ffffff; 161 | h4 += c; 162 | c = h4 >> 26; 163 | h4 = h4 & 0x3ffffff; 164 | h0 += c * 5; 165 | c = h0 >> 26; 166 | h0 = h0 & 0x3ffffff; 167 | h1 += c; 168 | 169 | // compute h + -p 170 | let mut g0 = h0.wrapping_add(5); 171 | c = g0 >> 26; 172 | g0 &= 0x3ffffff; 173 | let mut g1 = h1.wrapping_add(c); 174 | c = g1 >> 26; 175 | g1 &= 0x3ffffff; 176 | let mut g2 = h2.wrapping_add(c); 177 | c = g2 >> 26; 178 | g2 &= 0x3ffffff; 179 | let mut g3 = h3.wrapping_add(c); 180 | c = g3 >> 26; 181 | g3 &= 0x3ffffff; 182 | let mut g4 = h4.wrapping_add(c).wrapping_sub(1 << 26); 183 | 184 | // select h if h < p, or h + -p if h >= p 185 | let mut mask = (g4 >> (32 - 1)).wrapping_sub(1); 186 | g0 &= mask; 187 | g1 &= mask; 188 | g2 &= mask; 189 | g3 &= mask; 190 | g4 &= mask; 191 | mask = !mask; 192 | h0 = (h0 & mask) | g0; 193 | h1 = (h1 & mask) | g1; 194 | h2 = (h2 & mask) | g2; 195 | h3 = (h3 & mask) | g3; 196 | h4 = (h4 & mask) | g4; 197 | 198 | // h = h % (2^128) 199 | h0 = ((h0) | (h1 << 26)) & 0xffffffff; 200 | h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff; 201 | h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff; 202 | h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff; 203 | 204 | // h = mac = (h + pad) % (2^128) 205 | let mut f: u64; 206 | f = h0 as u64 + self.pad[0] as u64; 207 | h0 = f as u32; 208 | f = h1 as u64 + self.pad[1] as u64 + (f >> 32); 209 | h1 = f as u32; 210 | f = h2 as u64 + self.pad[2] as u64 + (f >> 32); 211 | h2 = f as u32; 212 | f = h3 as u64 + self.pad[3] as u64 + (f >> 32); 213 | h3 = f as u32; 214 | 215 | self.h[0] = h0; 216 | self.h[1] = h1; 217 | self.h[2] = h2; 218 | self.h[3] = h3; 219 | } 220 | 221 | pub fn input(&mut self, data: &[u8]) { 222 | assert!(!self.finalized); 223 | let mut m = data; 224 | 225 | if self.leftover > 0 { 226 | let want = min(16 - self.leftover, m.len()); 227 | for i in 0..want { 228 | self.buffer[self.leftover + i] = m[i]; 229 | } 230 | m = &m[want..]; 231 | self.leftover += want; 232 | 233 | if self.leftover < 16 { 234 | return; 235 | } 236 | 237 | // self.block(self.buffer[..]); 238 | let tmp = self.buffer; 239 | self.block(&tmp); 240 | 241 | self.leftover = 0; 242 | } 243 | 244 | while m.len() >= 16 { 245 | self.block(&m[0..16]); 246 | m = &m[16..]; 247 | } 248 | 249 | for i in 0..m.len() { 250 | self.buffer[i] = m[i]; 251 | } 252 | self.leftover = m.len(); 253 | } 254 | 255 | pub fn raw_result(&mut self, output: &mut [u8]) { 256 | assert!(output.len() >= 16); 257 | if !self.finalized { 258 | self.finish(); 259 | } 260 | output[0..4].copy_from_slice(&self.h[0].to_le_bytes()); 261 | output[4..8].copy_from_slice(&self.h[1].to_le_bytes()); 262 | output[8..12].copy_from_slice(&self.h[2].to_le_bytes()); 263 | output[12..16].copy_from_slice(&self.h[3].to_le_bytes()); 264 | } 265 | } 266 | 267 | #[cfg(test)] 268 | mod test { 269 | use core::iter::repeat; 270 | 271 | use crate::crypto::poly1305::Poly1305; 272 | 273 | fn poly1305(key: &[u8], msg: &[u8], mac: &mut [u8]) { 274 | let mut poly = Poly1305::new(key); 275 | poly.input(msg); 276 | poly.raw_result(mac); 277 | } 278 | 279 | #[test] 280 | fn test_nacl_vector() { 281 | let key = [ 282 | 0xee, 0xa6, 0xa7, 0x25, 0x1c, 0x1e, 0x72, 0x91, 0x6d, 0x11, 0xc2, 0xcb, 0x21, 0x4d, 283 | 0x3c, 0x25, 0x25, 0x39, 0x12, 0x1d, 0x8e, 0x23, 0x4e, 0x65, 0x2d, 0x65, 0x1f, 0xa4, 284 | 0xc8, 0xcf, 0xf8, 0x80, 285 | ]; 286 | 287 | let msg = [ 288 | 0x8e, 0x99, 0x3b, 0x9f, 0x48, 0x68, 0x12, 0x73, 0xc2, 0x96, 0x50, 0xba, 0x32, 0xfc, 289 | 0x76, 0xce, 0x48, 0x33, 0x2e, 0xa7, 0x16, 0x4d, 0x96, 0xa4, 0x47, 0x6f, 0xb8, 0xc5, 290 | 0x31, 0xa1, 0x18, 0x6a, 0xc0, 0xdf, 0xc1, 0x7c, 0x98, 0xdc, 0xe8, 0x7b, 0x4d, 0xa7, 291 | 0xf0, 0x11, 0xec, 0x48, 0xc9, 0x72, 0x71, 0xd2, 0xc2, 0x0f, 0x9b, 0x92, 0x8f, 0xe2, 292 | 0x27, 0x0d, 0x6f, 0xb8, 0x63, 0xd5, 0x17, 0x38, 0xb4, 0x8e, 0xee, 0xe3, 0x14, 0xa7, 293 | 0xcc, 0x8a, 0xb9, 0x32, 0x16, 0x45, 0x48, 0xe5, 0x26, 0xae, 0x90, 0x22, 0x43, 0x68, 294 | 0x51, 0x7a, 0xcf, 0xea, 0xbd, 0x6b, 0xb3, 0x73, 0x2b, 0xc0, 0xe9, 0xda, 0x99, 0x83, 295 | 0x2b, 0x61, 0xca, 0x01, 0xb6, 0xde, 0x56, 0x24, 0x4a, 0x9e, 0x88, 0xd5, 0xf9, 0xb3, 296 | 0x79, 0x73, 0xf6, 0x22, 0xa4, 0x3d, 0x14, 0xa6, 0x59, 0x9b, 0x1f, 0x65, 0x4c, 0xb4, 297 | 0x5a, 0x74, 0xe3, 0x55, 0xa5, 298 | ]; 299 | 300 | let expected = [ 301 | 0xf3, 0xff, 0xc7, 0x70, 0x3f, 0x94, 0x00, 0xe5, 0x2a, 0x7d, 0xfb, 0x4b, 0x3d, 0x33, 302 | 0x05, 0xd9, 303 | ]; 304 | 305 | let mut mac = [0u8; 16]; 306 | poly1305(&key, &msg, &mut mac); 307 | assert_eq!(&mac[..], &expected[..]); 308 | 309 | let mut poly = Poly1305::new(&key); 310 | poly.input(&msg[0..32]); 311 | poly.input(&msg[32..96]); 312 | poly.input(&msg[96..112]); 313 | poly.input(&msg[112..120]); 314 | poly.input(&msg[120..124]); 315 | poly.input(&msg[124..126]); 316 | poly.input(&msg[126..127]); 317 | poly.input(&msg[127..128]); 318 | poly.input(&msg[128..129]); 319 | poly.input(&msg[129..130]); 320 | poly.input(&msg[130..131]); 321 | poly.raw_result(&mut mac); 322 | assert_eq!(&mac[..], &expected[..]); 323 | } 324 | 325 | #[test] 326 | fn donna_self_test() { 327 | let wrap_key = [ 328 | 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 329 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 330 | 0x00, 0x00, 0x00, 0x00, 331 | ]; 332 | 333 | let wrap_msg = [ 334 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 335 | 0xff, 0xff, 336 | ]; 337 | 338 | let wrap_mac = [ 339 | 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 340 | 0x00, 0x00, 341 | ]; 342 | 343 | let mut mac = [0u8; 16]; 344 | poly1305(&wrap_key, &wrap_msg, &mut mac); 345 | assert_eq!(&mac[..], &wrap_mac[..]); 346 | 347 | let total_key = [ 348 | 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 349 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 350 | 0x00, 0x00, 0x00, 0x00, 351 | ]; 352 | 353 | let total_mac = [ 354 | 0x64, 0xaf, 0xe2, 0xe8, 0xd6, 0xad, 0x7b, 0xbd, 0xd2, 0x87, 0xf9, 0x7c, 0x44, 0x62, 355 | 0x3d, 0x39, 356 | ]; 357 | 358 | let mut tpoly = Poly1305::new(&total_key); 359 | for i in 0..256 { 360 | let key: Vec = repeat(i as u8).take(32).collect(); 361 | let msg: Vec = repeat(i as u8).take(256).collect(); 362 | let mut mac = [0u8; 16]; 363 | poly1305(&key[..], &msg[0..i], &mut mac); 364 | tpoly.input(&mac); 365 | } 366 | tpoly.raw_result(&mut mac); 367 | assert_eq!(&mac[..], &total_mac[..]); 368 | } 369 | 370 | #[test] 371 | fn test_tls_vectors() { 372 | // from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04 373 | let key = b"this is 32-byte key for Poly1305"; 374 | let msg = [0u8; 32]; 375 | let expected = [ 376 | 0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 377 | 0x03, 0x07, 378 | ]; 379 | let mut mac = [0u8; 16]; 380 | poly1305(key, &msg, &mut mac); 381 | assert_eq!(&mac[..], &expected[..]); 382 | 383 | let msg = b"Hello world!"; 384 | let expected = [ 385 | 0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 386 | 0xb2, 0xf0, 387 | ]; 388 | poly1305(key, msg, &mut mac); 389 | assert_eq!(&mac[..], &expected[..]); 390 | } 391 | } 392 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use crate::types::{ErrorCode, ErrorResponse}; 2 | use prost::bytes::Bytes; 3 | use prost::{DecodeError, Message}; 4 | use reqwest::StatusCode; 5 | use std::error::Error; 6 | use std::fmt::{Display, Formatter}; 7 | 8 | /// When there is an error while writing to VSS storage, the response contains a relevant error code. 9 | /// A mapping from a VSS server error codes. Refer to [`ErrorResponse`] docs for more 10 | /// information regarding each error code and corresponding use-cases. 11 | #[derive(Debug)] 12 | pub enum VssError { 13 | /// Please refer to [`ErrorCode::NoSuchKeyException`]. 14 | NoSuchKeyError(String), 15 | 16 | /// Please refer to [`ErrorCode::InvalidRequestException`]. 17 | InvalidRequestError(String), 18 | 19 | /// Please refer to [`ErrorCode::ConflictException`]. 20 | ConflictError(String), 21 | 22 | /// Please refer to [`ErrorCode::AuthException`]. 23 | AuthError(String), 24 | 25 | /// Please refer to [`ErrorCode::InternalServerException`]. 26 | InternalServerError(String), 27 | 28 | /// There is an unknown error, it could be a client-side bug, unrecognized error-code, network error 29 | /// or something else. 30 | InternalError(String), 31 | } 32 | 33 | impl VssError { 34 | /// Create new instance of `VssError` 35 | pub fn new(status: StatusCode, payload: Bytes) -> VssError { 36 | match ErrorResponse::decode(&payload[..]) { 37 | Ok(error_response) => VssError::from(error_response), 38 | Err(e) => { 39 | let message = format!( 40 | "Unable to decode ErrorResponse from server, HttpStatusCode: {}, DecodeErr: {}", 41 | status, e 42 | ); 43 | VssError::InternalError(message) 44 | }, 45 | } 46 | } 47 | } 48 | 49 | impl Display for VssError { 50 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 51 | match self { 52 | VssError::NoSuchKeyError(message) => { 53 | write!(f, "Requested key does not exist: {}", message) 54 | }, 55 | VssError::InvalidRequestError(message) => { 56 | write!(f, "Request sent to VSS Storage was invalid: {}", message) 57 | }, 58 | VssError::ConflictError(message) => { 59 | write!(f, "Potential version conflict in write operation: {}", message) 60 | }, 61 | VssError::AuthError(message) => { 62 | write!(f, "Authentication or Authorization failure: {}", message) 63 | }, 64 | VssError::InternalServerError(message) => { 65 | write!(f, "InternalServerError: {}", message) 66 | }, 67 | VssError::InternalError(message) => { 68 | write!(f, "InternalError: {}", message) 69 | }, 70 | } 71 | } 72 | } 73 | 74 | impl Error for VssError {} 75 | 76 | impl From for VssError { 77 | fn from(error_response: ErrorResponse) -> Self { 78 | match error_response.error_code() { 79 | ErrorCode::NoSuchKeyException => VssError::NoSuchKeyError(error_response.message), 80 | ErrorCode::InvalidRequestException => { 81 | VssError::InvalidRequestError(error_response.message) 82 | }, 83 | ErrorCode::ConflictException => VssError::ConflictError(error_response.message), 84 | ErrorCode::AuthException => VssError::AuthError(error_response.message), 85 | ErrorCode::InternalServerException => { 86 | VssError::InternalServerError(error_response.message) 87 | }, 88 | _ => VssError::InternalError(format!( 89 | "VSS responded with an unknown error code: {}, message: {}", 90 | error_response.error_code, error_response.message 91 | )), 92 | } 93 | } 94 | } 95 | 96 | impl From for VssError { 97 | fn from(err: DecodeError) -> Self { 98 | VssError::InternalError(err.to_string()) 99 | } 100 | } 101 | 102 | impl From for VssError { 103 | fn from(err: reqwest::Error) -> Self { 104 | VssError::InternalError(err.to_string()) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/headers/lnurl_auth_jwt.rs: -------------------------------------------------------------------------------- 1 | use crate::headers::{get_headermap, VssHeaderProvider, VssHeaderProviderError}; 2 | use async_trait::async_trait; 3 | use base64::engine::general_purpose::URL_SAFE_NO_PAD; 4 | use base64::Engine; 5 | use bitcoin::bip32::{ChildNumber, DerivationPath, Xpriv}; 6 | use bitcoin::hashes::hex::FromHex; 7 | use bitcoin::hashes::sha256; 8 | use bitcoin::hashes::{Hash, HashEngine, Hmac, HmacEngine}; 9 | use bitcoin::secp256k1::{Message, Secp256k1, SignOnly}; 10 | use bitcoin::PrivateKey; 11 | use serde::Deserialize; 12 | use std::collections::HashMap; 13 | use std::sync::RwLock; 14 | use std::time::{Duration, SystemTime}; 15 | use url::Url; 16 | 17 | // Derivation index of the hashing private key as defined by LUD-05. 18 | const HASHING_DERIVATION_INDEX: u32 = 0; 19 | // The JWT token will be refreshed by the given amount before its expiry. 20 | const EXPIRY_BUFFER: Duration = Duration::from_secs(60); 21 | // The key of the LNURL k1 query parameter. 22 | const K1_QUERY_PARAM: &str = "k1"; 23 | // The key of the LNURL sig query parameter. 24 | const SIG_QUERY_PARAM: &str = "sig"; 25 | // The key of the LNURL key query parameter. 26 | const KEY_QUERY_PARAM: &str = "key"; 27 | // The authorization header name. 28 | const AUTHORIZATION: &str = "Authorization"; 29 | 30 | #[derive(Debug, Clone)] 31 | struct JwtToken { 32 | token_str: String, 33 | expiry: Option, 34 | } 35 | 36 | impl JwtToken { 37 | fn is_expired(&self) -> bool { 38 | self.expiry 39 | .and_then(|expiry| { 40 | SystemTime::now() 41 | .checked_add(EXPIRY_BUFFER) 42 | .map(|now_with_buffer| now_with_buffer > expiry) 43 | }) 44 | .unwrap_or(false) 45 | } 46 | } 47 | 48 | /// Provides a JWT token based on LNURL Auth. 49 | pub struct LnurlAuthToJwtProvider { 50 | engine: Secp256k1, 51 | parent_key: Xpriv, 52 | url: String, 53 | default_headers: HashMap, 54 | client: reqwest::Client, 55 | cached_jwt_token: RwLock>, 56 | } 57 | 58 | impl LnurlAuthToJwtProvider { 59 | /// Creates a new JWT provider based on LNURL Auth. 60 | /// 61 | /// The LNURL Auth keys are derived as children from a hardened parent key, 62 | /// following [LUD-05](https://github.com/lnurl/luds/blob/luds/05.md). 63 | /// The hardened parent extended key is given here as an argument, and is suggested to be the 64 | /// `m/138'` derivation from the wallet master key as in the specification. 65 | /// However, users are free to choose a consistent hardened derivation path. 66 | /// 67 | /// The LNURL with the challenge will be retrieved by making a request to the given URL. 68 | /// The JWT token will be returned in response to the signed LNURL request under a token field. 69 | /// The given set of headers will be used for LNURL requests, and will also be returned together 70 | /// with the JWT authorization header for VSS requests. 71 | pub fn new( 72 | parent_key: Xpriv, url: String, default_headers: HashMap, 73 | ) -> Result { 74 | let engine = Secp256k1::signing_only(); 75 | let default_headermap = get_headermap(&default_headers)?; 76 | let client = reqwest::Client::builder() 77 | .default_headers(default_headermap) 78 | .build() 79 | .map_err(VssHeaderProviderError::from)?; 80 | 81 | Ok(LnurlAuthToJwtProvider { 82 | engine, 83 | parent_key, 84 | url, 85 | default_headers, 86 | client, 87 | cached_jwt_token: RwLock::new(None), 88 | }) 89 | } 90 | 91 | async fn fetch_jwt_token(&self) -> Result { 92 | // Fetch the LNURL. 93 | let lnurl_str = self 94 | .client 95 | .get(&self.url) 96 | .send() 97 | .await 98 | .map_err(VssHeaderProviderError::from)? 99 | .text() 100 | .await 101 | .map_err(VssHeaderProviderError::from)?; 102 | 103 | // Sign the LNURL and perform the request. 104 | let signed_lnurl = sign_lnurl(&self.engine, &self.parent_key, &lnurl_str)?; 105 | let lnurl_auth_response: LnurlAuthResponse = self 106 | .client 107 | .get(&signed_lnurl) 108 | .send() 109 | .await 110 | .map_err(VssHeaderProviderError::from)? 111 | .json() 112 | .await 113 | .map_err(VssHeaderProviderError::from)?; 114 | 115 | let untrusted_token = match lnurl_auth_response { 116 | LnurlAuthResponse { token: Some(token), .. } => token, 117 | LnurlAuthResponse { reason: Some(reason), .. } => { 118 | return Err(VssHeaderProviderError::AuthorizationError { 119 | error: format!("LNURL Auth failed, reason is: {}", reason.escape_debug()), 120 | }); 121 | }, 122 | _ => { 123 | return Err(VssHeaderProviderError::InvalidData { 124 | error: "LNURL Auth response did not contain a token nor an error".to_string(), 125 | }); 126 | }, 127 | }; 128 | parse_jwt_token(untrusted_token) 129 | } 130 | 131 | async fn get_jwt_token(&self, force_refresh: bool) -> Result { 132 | let cached_token_str = if force_refresh { 133 | None 134 | } else { 135 | let jwt_token = self.cached_jwt_token.read().unwrap(); 136 | jwt_token.as_ref().filter(|t| !t.is_expired()).map(|t| t.token_str.clone()) 137 | }; 138 | if let Some(token_str) = cached_token_str { 139 | Ok(token_str) 140 | } else { 141 | let jwt_token = self.fetch_jwt_token().await?; 142 | *self.cached_jwt_token.write().unwrap() = Some(jwt_token.clone()); 143 | Ok(jwt_token.token_str) 144 | } 145 | } 146 | } 147 | 148 | #[async_trait] 149 | impl VssHeaderProvider for LnurlAuthToJwtProvider { 150 | async fn get_headers( 151 | &self, _request: &[u8], 152 | ) -> Result, VssHeaderProviderError> { 153 | let jwt_token = self.get_jwt_token(false).await?; 154 | let mut headers = self.default_headers.clone(); 155 | headers.insert(AUTHORIZATION.to_string(), format!("Bearer {}", jwt_token)); 156 | Ok(headers) 157 | } 158 | } 159 | 160 | fn hashing_key( 161 | engine: &Secp256k1, parent_key: &Xpriv, 162 | ) -> Result { 163 | let hashing_child_number = ChildNumber::from_normal_idx(HASHING_DERIVATION_INDEX) 164 | .map_err(VssHeaderProviderError::from)?; 165 | parent_key 166 | .derive_priv(engine, &vec![hashing_child_number]) 167 | .map(|xpriv| xpriv.to_priv()) 168 | .map_err(VssHeaderProviderError::from) 169 | } 170 | 171 | fn linking_key_path( 172 | hashing_key: &PrivateKey, domain_name: &str, 173 | ) -> Result { 174 | let mut engine = HmacEngine::::new(&hashing_key.inner[..]); 175 | engine.input(domain_name.as_bytes()); 176 | let result = Hmac::::from_engine(engine).to_byte_array(); 177 | // unwrap safety: We take 4-byte chunks, so TryInto for [u8; 4] never fails. 178 | let children = result 179 | .chunks_exact(4) 180 | .take(4) 181 | .map(|i| u32::from_be_bytes(i.try_into().unwrap())) 182 | .map(ChildNumber::from); 183 | Ok(DerivationPath::from_iter(children)) 184 | } 185 | 186 | fn sign_lnurl( 187 | engine: &Secp256k1, parent_key: &Xpriv, lnurl_str: &str, 188 | ) -> Result { 189 | // Parse k1 parameter to sign. 190 | let invalid_lnurl = || VssHeaderProviderError::InvalidData { 191 | error: format!("invalid lnurl: {}", lnurl_str.escape_debug()), 192 | }; 193 | let mut lnurl = Url::parse(lnurl_str).map_err(|_| invalid_lnurl())?; 194 | let domain = lnurl.domain().ok_or(invalid_lnurl())?; 195 | let k1_str = lnurl 196 | .query_pairs() 197 | .find(|(k, _)| k == K1_QUERY_PARAM) 198 | .ok_or(invalid_lnurl())? 199 | .1 200 | .to_string(); 201 | let k1: [u8; 32] = FromHex::from_hex(&k1_str).map_err(|_| invalid_lnurl())?; 202 | 203 | // Sign k1 parameter with linking private key. 204 | let hashing_private_key = hashing_key(engine, parent_key)?; 205 | let linking_key_path = linking_key_path(&hashing_private_key, domain)?; 206 | let linking_private_key = parent_key 207 | .derive_priv(engine, &linking_key_path) 208 | .map_err(VssHeaderProviderError::from)? 209 | .to_priv(); 210 | let linking_public_key = linking_private_key.public_key(engine); 211 | let message = Message::from_digest_slice(&k1).map_err(|_| { 212 | VssHeaderProviderError::InvalidData { error: format!("invalid k1: {:?}", k1) } 213 | })?; 214 | let sig = engine.sign_ecdsa(&message, &linking_private_key.inner); 215 | 216 | // Compose LNURL with signature and linking public key. 217 | lnurl 218 | .query_pairs_mut() 219 | .append_pair(SIG_QUERY_PARAM, &sig.serialize_der().to_string()) 220 | .append_pair(KEY_QUERY_PARAM, &linking_public_key.to_string()); 221 | Ok(lnurl.to_string()) 222 | } 223 | 224 | #[derive(Deserialize, Debug, Clone)] 225 | struct LnurlAuthResponse { 226 | reason: Option, 227 | token: Option, 228 | } 229 | 230 | #[derive(Deserialize, Debug, Clone)] 231 | struct ExpiryClaim { 232 | #[serde(rename = "exp")] 233 | expiry_secs: Option, 234 | } 235 | 236 | fn parse_jwt_token(jwt_token: String) -> Result { 237 | let parts: Vec<&str> = jwt_token.split('.').collect(); 238 | let invalid = || VssHeaderProviderError::InvalidData { 239 | error: format!("invalid JWT token: {}", jwt_token.escape_debug()), 240 | }; 241 | if parts.len() != 3 { 242 | return Err(invalid()); 243 | } 244 | let _ = URL_SAFE_NO_PAD.decode(parts[0]).map_err(|_| invalid())?; 245 | let bytes = URL_SAFE_NO_PAD.decode(parts[1]).map_err(|_| invalid())?; 246 | let _ = URL_SAFE_NO_PAD.decode(parts[2]).map_err(|_| invalid())?; 247 | let claim: ExpiryClaim = serde_json::from_slice(&bytes).map_err(|_| invalid())?; 248 | let expiry = 249 | claim.expiry_secs.and_then(|e| SystemTime::UNIX_EPOCH.checked_add(Duration::from_secs(e))); 250 | Ok(JwtToken { token_str: jwt_token, expiry }) 251 | } 252 | 253 | impl From for VssHeaderProviderError { 254 | fn from(e: bitcoin::bip32::Error) -> VssHeaderProviderError { 255 | VssHeaderProviderError::InternalError { error: e.to_string() } 256 | } 257 | } 258 | 259 | impl From for VssHeaderProviderError { 260 | fn from(e: reqwest::Error) -> VssHeaderProviderError { 261 | VssHeaderProviderError::RequestError { error: e.to_string() } 262 | } 263 | } 264 | 265 | #[cfg(test)] 266 | mod test { 267 | use crate::headers::lnurl_auth_jwt::{linking_key_path, sign_lnurl}; 268 | use bitcoin::bip32::Xpriv; 269 | use bitcoin::hashes::hex::FromHex; 270 | use bitcoin::secp256k1::Secp256k1; 271 | use bitcoin::secp256k1::SecretKey; 272 | use bitcoin::Network; 273 | use bitcoin::PrivateKey; 274 | use std::str::FromStr; 275 | 276 | #[test] 277 | fn test_linking_key_path() { 278 | // Test vector from: 279 | // https://github.com/lnurl/luds/blob/43cf7754de2033987a7661afc8b4a3998914a536/05.md 280 | let hashing_key = PrivateKey::new( 281 | SecretKey::from_str("7d417a6a5e9a6a4a879aeaba11a11838764c8fa2b959c242d43dea682b3e409b") 282 | .unwrap(), 283 | Network::Testnet, // The network only matters for serialization. 284 | ); 285 | let path = linking_key_path(&hashing_key, "site.com").unwrap(); 286 | let numbers: Vec = path.into_iter().map(|c| u32::from(c.clone())).collect(); 287 | assert_eq!(numbers, vec![1588488367, 2659270754, 38110259, 4136336762]); 288 | } 289 | 290 | #[test] 291 | fn test_sign_lnurl() { 292 | let engine = Secp256k1::signing_only(); 293 | let parent_key_bytes: [u8; 32] = 294 | FromHex::from_hex("abababababababababababababababababababababababababababababababab") 295 | .unwrap(); 296 | let parent_key = Xpriv::new_master(Network::Testnet, &parent_key_bytes).unwrap(); 297 | let signed = sign_lnurl( 298 | &engine, 299 | &parent_key, 300 | "https://example.com/path?tag=login&k1=e2af6254a8df433264fa23f67eb8188635d15ce883e8fc020989d5f82ae6f11e", 301 | ) 302 | .unwrap(); 303 | assert_eq!( 304 | signed, 305 | "https://example.com/path?tag=login&k1=e2af6254a8df433264fa23f67eb8188635d15ce883e8fc020989d5f82ae6f11e&sig=3045022100a75df468de452e618edb8030016eb0894204655c7d93ece1be007fcf36843522022048bc2f00a0a5a30601d274b49cfaf9ef4c76176e5401d0dfb195f5d6ab8ab4c4&key=02d9eb1b467517d685e3b5439082c14bb1a2c9ae672df4d9046d208c193a5846e0", 306 | ); 307 | } 308 | } 309 | -------------------------------------------------------------------------------- /src/headers/mod.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use reqwest::header::HeaderMap; 3 | use std::collections::HashMap; 4 | use std::error::Error; 5 | use std::fmt::Display; 6 | use std::fmt::Formatter; 7 | use std::str::FromStr; 8 | 9 | #[cfg(feature = "lnurl-auth")] 10 | mod lnurl_auth_jwt; 11 | 12 | #[cfg(feature = "lnurl-auth")] 13 | pub use lnurl_auth_jwt::LnurlAuthToJwtProvider; 14 | 15 | /// Defines a trait around how headers are provided for each VSS request. 16 | #[async_trait] 17 | pub trait VssHeaderProvider: Send + Sync { 18 | /// Returns the HTTP headers to be used for a VSS request. 19 | /// This method is called on each request, and should likely perform some form of caching. 20 | /// 21 | /// A reference to the serialized request body is given as `request`. 22 | /// It can be used to perform operations such as request signing. 23 | async fn get_headers( 24 | &self, request: &[u8], 25 | ) -> Result, VssHeaderProviderError>; 26 | } 27 | 28 | /// Errors around providing headers for each VSS request. 29 | #[derive(Debug)] 30 | pub enum VssHeaderProviderError { 31 | /// Invalid data was encountered. 32 | InvalidData { 33 | /// The error message. 34 | error: String, 35 | }, 36 | /// An external request failed. 37 | RequestError { 38 | /// The error message. 39 | error: String, 40 | }, 41 | /// Authorization was refused. 42 | AuthorizationError { 43 | /// The error message. 44 | error: String, 45 | }, 46 | /// An application-level error occurred specific to the header provider functionality. 47 | InternalError { 48 | /// The error message. 49 | error: String, 50 | }, 51 | } 52 | 53 | impl Display for VssHeaderProviderError { 54 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 55 | match self { 56 | Self::InvalidData { error } => { 57 | write!(f, "invalid data: {}", error) 58 | }, 59 | Self::RequestError { error } => { 60 | write!(f, "error performing external request: {}", error) 61 | }, 62 | Self::AuthorizationError { error } => { 63 | write!(f, "authorization was refused: {}", error) 64 | }, 65 | Self::InternalError { error } => { 66 | write!(f, "internal error: {}", error) 67 | }, 68 | } 69 | } 70 | } 71 | 72 | impl Error for VssHeaderProviderError {} 73 | 74 | /// A header provider returning an given, fixed set of headers. 75 | pub struct FixedHeaders { 76 | headers: HashMap, 77 | } 78 | 79 | impl FixedHeaders { 80 | /// Creates a new header provider returning the given, fixed set of headers. 81 | pub fn new(headers: HashMap) -> FixedHeaders { 82 | FixedHeaders { headers } 83 | } 84 | } 85 | 86 | #[async_trait] 87 | impl VssHeaderProvider for FixedHeaders { 88 | async fn get_headers( 89 | &self, _request: &[u8], 90 | ) -> Result, VssHeaderProviderError> { 91 | Ok(self.headers.clone()) 92 | } 93 | } 94 | 95 | pub(crate) fn get_headermap( 96 | headers: &HashMap, 97 | ) -> Result { 98 | let mut headermap = HeaderMap::new(); 99 | for (name, value) in headers { 100 | headermap.insert( 101 | reqwest::header::HeaderName::from_str(&name) 102 | .map_err(|e| VssHeaderProviderError::InvalidData { error: e.to_string() })?, 103 | reqwest::header::HeaderValue::from_str(&value) 104 | .map_err(|e| VssHeaderProviderError::InvalidData { error: e.to_string() })?, 105 | ); 106 | } 107 | Ok(headermap) 108 | } 109 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Client-side library to interact with Versioned Storage Service (VSS). 2 | //! 3 | //! VSS is an open-source project designed to offer a server-side cloud storage solution specifically 4 | //! tailored for noncustodial Lightning supporting mobile wallets. Its primary objective is to 5 | //! simplify the development process for Lightning wallets by providing a secure means to store 6 | //! and manage the essential state required for Lightning Network (LN) operations. 7 | //! 8 | //! Learn more [here](https://github.com/lightningdevkit/vss-server/blob/main/README.md). 9 | 10 | #![deny(rustdoc::broken_intra_doc_links)] 11 | #![deny(rustdoc::private_intra_doc_links)] 12 | #![deny(missing_docs)] 13 | 14 | /// Implements a thin-client ([`client::VssClient`]) to access a hosted instance of Versioned Storage Service (VSS). 15 | pub mod client; 16 | 17 | /// Implements the error type ([`error::VssError`]) returned on interacting with [`client::VssClient`] 18 | pub mod error; 19 | 20 | /// Contains request/response types generated from the API definition of VSS. 21 | pub mod types; 22 | 23 | /// Contains utils for encryption, requests-retries etc. 24 | pub mod util; 25 | 26 | // Encryption-Decryption related crate-only helpers. 27 | pub(crate) mod crypto; 28 | 29 | /// A collection of header providers. 30 | pub mod headers; 31 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | /// Request payload to be used for `GetObject` API call to server. 2 | #[allow(clippy::derive_partial_eq_without_eq)] 3 | #[derive(Clone, PartialEq, ::prost::Message)] 4 | pub struct GetObjectRequest { 5 | /// `store_id` is a keyspace identifier. 6 | /// Ref: ) 7 | /// All APIs operate within a single `store_id`. 8 | /// It is up to clients to use single or multiple stores for their use-case. 9 | /// This can be used for client-isolation/ rate-limiting / throttling on the server-side. 10 | /// Authorization and billing can also be performed at the `store_id` level. 11 | #[prost(string, tag = "1")] 12 | pub store_id: ::prost::alloc::string::String, 13 | /// The key of the value to be fetched. 14 | /// 15 | /// If the specified `key` does not exist, returns `ErrorCode.NO_SUCH_KEY_EXCEPTION` in the 16 | /// the `ErrorResponse`. 17 | /// 18 | /// Consistency Guarantee: 19 | /// Get(read) operations against a `key` are consistent reads and will reflect all previous writes, 20 | /// since Put/Write provides read-after-write and read-after-update consistency guarantees. 21 | /// 22 | /// Read Isolation: 23 | /// Get/Read operations against a `key` are ensured to have read-committed isolation. 24 | /// Ref: )#Read_committed 25 | #[prost(string, tag = "2")] 26 | pub key: ::prost::alloc::string::String, 27 | } 28 | /// Server response for `GetObject` API. 29 | #[allow(clippy::derive_partial_eq_without_eq)] 30 | #[derive(Clone, PartialEq, ::prost::Message)] 31 | pub struct GetObjectResponse { 32 | /// Fetched `value` and `version` along with the corresponding `key` in the request. 33 | #[prost(message, optional, tag = "2")] 34 | pub value: ::core::option::Option, 35 | } 36 | /// Request payload to be used for `PutObject` API call to server. 37 | #[allow(clippy::derive_partial_eq_without_eq)] 38 | #[derive(Clone, PartialEq, ::prost::Message)] 39 | pub struct PutObjectRequest { 40 | /// `store_id` is a keyspace identifier. 41 | /// Ref: ) 42 | /// All APIs operate within a single `store_id`. 43 | /// It is up to clients to use single or multiple stores for their use-case. 44 | /// This can be used for client-isolation/ rate-limiting / throttling on the server-side. 45 | /// Authorization and billing can also be performed at the `store_id` level. 46 | #[prost(string, tag = "1")] 47 | pub store_id: ::prost::alloc::string::String, 48 | /// `global_version` is a sequence-number/version of the whole store. This can be used for versioning 49 | /// and ensures that multiple updates in case of multiple devices can only be done linearly, even 50 | /// if those updates did not directly conflict with each other based on keys/`transaction_items`. 51 | /// 52 | /// If present, the write will only succeed if the current server-side `global_version` against 53 | /// the `store_id` is same as in the request. 54 | /// Clients are expected to store (client-side) the global version against `store_id`. 55 | /// The request must contain their client-side value of `global_version` if global versioning and 56 | /// conflict detection is desired. 57 | /// 58 | /// For the first write of the store, global version should be '0'. If the write succeeds, clients 59 | /// must increment their global version (client-side) by 1. 60 | /// The server increments `global_version` (server-side) for every successful write, hence this 61 | /// client-side increment is required to ensure matching versions. This updated global version 62 | /// should be used in subsequent `PutObjectRequest`s for the store. 63 | /// 64 | /// Requests with a conflicting version will fail with `CONFLICT_EXCEPTION` as ErrorCode. 65 | #[prost(int64, optional, tag = "2")] 66 | pub global_version: ::core::option::Option, 67 | /// Items to be written as a result of this `PutObjectRequest`. 68 | /// 69 | /// In an item, each `key` is supplied with its corresponding `value` and `version`. 70 | /// Clients can choose to encrypt the keys client-side in order to obfuscate their usage patterns. 71 | /// If the write is successful, the previous `value` corresponding to the `key` will be overwritten. 72 | /// 73 | /// Multiple items in `transaction_items` and `delete_items` of a single `PutObjectRequest` are written in 74 | /// a database-transaction in an all-or-nothing fashion. 75 | /// All Items in a single `PutObjectRequest` must have distinct keys. 76 | /// 77 | /// Key-level versioning (Conditional Write): 78 | /// Clients are expected to store a `version` against every `key`. 79 | /// The write will succeed if the current DB version against the `key` is the same as in the request. 80 | /// When initiating a `PutObjectRequest`, the request should contain their client-side `version` 81 | /// for that key-value. 82 | /// 83 | /// For the first write of any `key`, the `version` should be '0'. If the write succeeds, the client 84 | /// must increment their corresponding key versions (client-side) by 1. 85 | /// The server increments key versions (server-side) for every successful write, hence this 86 | /// client-side increment is required to ensure matching versions. These updated key versions should 87 | /// be used in subsequent `PutObjectRequest`s for the keys. 88 | /// 89 | /// Requests with a conflicting/mismatched version will fail with `CONFLICT_EXCEPTION` as ErrorCode 90 | /// for conditional writes. 91 | /// 92 | /// Skipping key-level versioning (Non-conditional Write): 93 | /// If you wish to skip key-level version checks, set the `version` against the `key` to '-1'. 94 | /// This will perform a non-conditional write query, after which the `version` against the `key` 95 | /// is reset to '1'. Hence, the next `PutObjectRequest` for the `key` can be either 96 | /// a non-conditional write or a conditional write with `version` set to `1`. 97 | /// 98 | /// Considerations for transactions: 99 | /// Transaction writes of multiple items have a performance overhead, hence it is recommended to use 100 | /// them only if required by the client application to ensure logic/code correctness. 101 | /// That is, `transaction_items` are not a substitute for batch-write of multiple unrelated items. 102 | /// When a write of multiple unrelated items is desired, it is recommended to use separate 103 | /// `PutObjectRequest`s. 104 | /// 105 | /// Consistency guarantee: 106 | /// All `PutObjectRequest`s are strongly consistent i.e. they provide read-after-write and 107 | /// read-after-update consistency guarantees. 108 | #[prost(message, repeated, tag = "3")] 109 | pub transaction_items: ::prost::alloc::vec::Vec, 110 | /// Items to be deleted as a result of this `PutObjectRequest`. 111 | /// 112 | /// Each item in the `delete_items` field consists of a `key` and its corresponding `version`. 113 | /// 114 | /// Key-Level Versioning (Conditional Delete): 115 | /// The `version` is used to perform a version check before deleting the item. 116 | /// The delete will only succeed if the current database version against the `key` is the same as 117 | /// the `version` specified in the request. 118 | /// 119 | /// Skipping key-level versioning (Non-conditional Delete): 120 | /// If you wish to skip key-level version checks, set the `version` against the `key` to '-1'. 121 | /// This will perform a non-conditional delete query. 122 | /// 123 | /// Fails with `CONFLICT_EXCEPTION` as the ErrorCode if: 124 | /// * The requested item does not exist. 125 | /// * The requested item does exist but there is a version-number mismatch (in conditional delete) 126 | /// with the one in the database. 127 | /// 128 | /// Multiple items in the `delete_items` field, along with the `transaction_items`, are written in a 129 | /// database transaction in an all-or-nothing fashion. 130 | /// 131 | /// All items within a single `PutObjectRequest` must have distinct keys. 132 | #[prost(message, repeated, tag = "4")] 133 | pub delete_items: ::prost::alloc::vec::Vec, 134 | } 135 | /// Server response for `PutObject` API. 136 | #[allow(clippy::derive_partial_eq_without_eq)] 137 | #[derive(Clone, PartialEq, ::prost::Message)] 138 | pub struct PutObjectResponse {} 139 | /// Request payload to be used for `DeleteObject` API call to server. 140 | #[allow(clippy::derive_partial_eq_without_eq)] 141 | #[derive(Clone, PartialEq, ::prost::Message)] 142 | pub struct DeleteObjectRequest { 143 | /// `store_id` is a keyspace identifier. 144 | /// Ref: ) 145 | /// All APIs operate within a single `store_id`. 146 | /// It is up to clients to use single or multiple stores for their use-case. 147 | /// This can be used for client-isolation/ rate-limiting / throttling on the server-side. 148 | /// Authorization and billing can also be performed at the `store_id` level. 149 | #[prost(string, tag = "1")] 150 | pub store_id: ::prost::alloc::string::String, 151 | /// Item to be deleted as a result of this `DeleteObjectRequest`. 152 | /// 153 | /// An item consists of a `key` and its corresponding `version`. 154 | /// 155 | /// Key-level Versioning (Conditional Delete): 156 | /// The item is only deleted if the current database version against the `key` is the same as 157 | /// the `version` specified in the request. 158 | /// 159 | /// Skipping key-level versioning (Non-conditional Delete): 160 | /// If you wish to skip key-level version checks, set the `version` against the `key` to '-1'. 161 | /// This will perform a non-conditional delete query. 162 | /// 163 | /// This operation is idempotent, that is, multiple delete calls for the same item will not fail. 164 | /// 165 | /// If the requested item does not exist, this operation will not fail. 166 | /// If you wish to perform stricter checks while deleting an item, consider using `PutObject` API. 167 | #[prost(message, optional, tag = "2")] 168 | pub key_value: ::core::option::Option, 169 | } 170 | /// Server response for `DeleteObject` API. 171 | #[allow(clippy::derive_partial_eq_without_eq)] 172 | #[derive(Clone, PartialEq, ::prost::Message)] 173 | pub struct DeleteObjectResponse {} 174 | /// Request payload to be used for `ListKeyVersions` API call to server. 175 | #[allow(clippy::derive_partial_eq_without_eq)] 176 | #[derive(Clone, PartialEq, ::prost::Message)] 177 | pub struct ListKeyVersionsRequest { 178 | /// `store_id` is a keyspace identifier. 179 | /// Ref: ) 180 | /// All APIs operate within a single `store_id`. 181 | /// It is up to clients to use single or multiple stores for their use-case. 182 | /// This can be used for client-isolation/ rate-limiting / throttling on the server-side. 183 | /// Authorization and billing can also be performed at the `store_id` level. 184 | #[prost(string, tag = "1")] 185 | pub store_id: ::prost::alloc::string::String, 186 | /// A `key_prefix` is a string of characters at the beginning of the key. Prefixes can be used as 187 | /// a way to organize key-values in a similar way to directories. 188 | /// 189 | /// If `key_prefix` is specified, the response results will be limited to those keys that begin with 190 | /// the specified prefix. 191 | /// 192 | /// If no `key_prefix` is specified or it is empty (""), all the keys are eligible to be returned in 193 | /// the response. 194 | #[prost(string, optional, tag = "2")] 195 | pub key_prefix: ::core::option::Option<::prost::alloc::string::String>, 196 | /// `page_size` is used by clients to specify the maximum number of results that can be returned by 197 | /// the server. 198 | /// The server may further constrain the maximum number of results returned in a single page. 199 | /// If the `page_size` is 0 or not set, the server will decide the number of results to be returned. 200 | #[prost(int32, optional, tag = "3")] 201 | pub page_size: ::core::option::Option, 202 | /// `page_token` is a pagination token. 203 | /// 204 | /// To query for the first page of `ListKeyVersions`, `page_token` must not be specified. 205 | /// 206 | /// For subsequent pages, use the value that was returned as `next_page_token` in the previous 207 | /// page's `ListKeyVersionsResponse`. 208 | #[prost(string, optional, tag = "4")] 209 | pub page_token: ::core::option::Option<::prost::alloc::string::String>, 210 | } 211 | /// Server response for `ListKeyVersions` API. 212 | #[allow(clippy::derive_partial_eq_without_eq)] 213 | #[derive(Clone, PartialEq, ::prost::Message)] 214 | pub struct ListKeyVersionsResponse { 215 | /// Fetched keys and versions. 216 | /// Even though this API reuses the `KeyValue` struct, the `value` sub-field will not be set by the server. 217 | #[prost(message, repeated, tag = "1")] 218 | pub key_versions: ::prost::alloc::vec::Vec, 219 | /// `next_page_token` is a pagination token, used to retrieve the next page of results. 220 | /// Use this value to query for next-page of paginated `ListKeyVersions` operation, by specifying 221 | /// this value as the `page_token` in the next request. 222 | /// 223 | /// If `next_page_token` is empty (""), then the "last page" of results has been processed and 224 | /// there is no more data to be retrieved. 225 | /// 226 | /// If `next_page_token` is not empty, it does not necessarily mean that there is more data in the 227 | /// result set. The only way to know when you have reached the end of the result set is when 228 | /// `next_page_token` is empty. 229 | /// 230 | /// Caution: Clients must not assume a specific number of key_versions to be present in a page for 231 | /// paginated response. 232 | #[prost(string, optional, tag = "2")] 233 | pub next_page_token: ::core::option::Option<::prost::alloc::string::String>, 234 | /// `global_version` is a sequence-number/version of the whole store. 235 | /// 236 | /// `global_version` is only returned in response for the first page of the `ListKeyVersionsResponse` 237 | /// and is guaranteed to be read before reading any key-versions. 238 | /// 239 | /// In case of refreshing the complete key-version view on the client-side, correct usage for 240 | /// the returned `global_version` is as following: 241 | /// 1. Read `global_version` from the first page of paginated response and save it as local variable. 242 | /// 2. Update all the `key_versions` on client-side from all the pages of paginated response. 243 | /// 3. Update `global_version` on client_side from the local variable saved in step-1. 244 | /// This ensures that on client-side, all current `key_versions` were stored at `global_version` or later. 245 | /// This guarantee is helpful for ensuring the versioning correctness if using the `global_version` 246 | /// in `PutObject` API and can help avoid the race conditions related to it. 247 | #[prost(int64, optional, tag = "3")] 248 | pub global_version: ::core::option::Option, 249 | } 250 | /// When HttpStatusCode is not ok (200), the response `content` contains a serialized `ErrorResponse` 251 | /// with the relevant `ErrorCode` and `message` 252 | #[allow(clippy::derive_partial_eq_without_eq)] 253 | #[derive(Clone, PartialEq, ::prost::Message)] 254 | pub struct ErrorResponse { 255 | /// The error code uniquely identifying an error condition. 256 | /// It is meant to be read and understood programmatically by code that detects/handles errors by 257 | /// type. 258 | #[prost(enumeration = "ErrorCode", tag = "1")] 259 | pub error_code: i32, 260 | /// The error message containing a generic description of the error condition in English. 261 | /// It is intended for a human audience only and should not be parsed to extract any information 262 | /// programmatically. Client-side code may use it for logging only. 263 | #[prost(string, tag = "2")] 264 | pub message: ::prost::alloc::string::String, 265 | } 266 | /// Represents a key-value pair to be stored or retrieved. 267 | #[allow(clippy::derive_partial_eq_without_eq)] 268 | #[derive(Clone, PartialEq, ::prost::Message)] 269 | pub struct KeyValue { 270 | /// Key against which the value is stored. 271 | #[prost(string, tag = "1")] 272 | pub key: ::prost::alloc::string::String, 273 | /// Version field is used for key-level versioning. 274 | /// For first write of key, `version` should be '0'. If the write succeeds, clients must increment 275 | /// their corresponding key version (client-side) by 1. 276 | /// The server increments key version (server-side) for every successful write, hence this 277 | /// client-side increment is required to ensure matching versions. These updated key versions should 278 | /// be used in subsequent `PutObjectRequest`s for the keys. 279 | #[prost(int64, tag = "2")] 280 | pub version: i64, 281 | /// Object value in bytes which is stored (in put) and fetched (in get). 282 | /// Clients must encrypt the secret contents of this blob client-side before sending it over the 283 | /// wire to the server in order to preserve privacy and security. 284 | /// Clients may use a `Storable` object, serialize it and set it here. 285 | #[prost(bytes = "vec", tag = "3")] 286 | pub value: ::prost::alloc::vec::Vec, 287 | } 288 | /// Represents a storable object that can be serialized and stored as `value` in `PutObjectRequest`. 289 | /// Only provided as a helper object for ease of use by clients. 290 | /// Clients MUST encrypt the `PlaintextBlob` before using it as `data` in `Storable`. 291 | /// The server does not use or read anything from `Storable`, Clients may use its fields as 292 | /// required. 293 | #[allow(clippy::derive_partial_eq_without_eq)] 294 | #[derive(Clone, PartialEq, ::prost::Message)] 295 | pub struct Storable { 296 | /// Represents an encrypted and serialized `PlaintextBlob`. MUST encrypt the whole `PlaintextBlob` 297 | /// using client-side encryption before setting here. 298 | #[prost(bytes = "vec", tag = "1")] 299 | pub data: ::prost::alloc::vec::Vec, 300 | /// Represents encryption related metadata 301 | #[prost(message, optional, tag = "2")] 302 | pub encryption_metadata: ::core::option::Option, 303 | } 304 | /// Represents encryption related metadata 305 | #[allow(clippy::derive_partial_eq_without_eq)] 306 | #[derive(Clone, PartialEq, ::prost::Message)] 307 | pub struct EncryptionMetadata { 308 | /// The encryption algorithm used for encrypting the `PlaintextBlob`. 309 | #[prost(string, tag = "1")] 310 | pub cipher_format: ::prost::alloc::string::String, 311 | /// The nonce used for encryption. Nonce is a random or unique value used to ensure that the same 312 | /// plaintext results in different ciphertexts every time it is encrypted. 313 | #[prost(bytes = "vec", tag = "2")] 314 | pub nonce: ::prost::alloc::vec::Vec, 315 | /// The authentication tag used for encryption. It provides integrity and authenticity assurance 316 | /// for the encrypted data. 317 | #[prost(bytes = "vec", tag = "3")] 318 | pub tag: ::prost::alloc::vec::Vec, 319 | } 320 | /// Represents a data blob, which is encrypted, serialized and later used in `Storable.data`. 321 | /// Since the whole `Storable.data` is client-side encrypted, the server cannot understand this. 322 | #[allow(clippy::derive_partial_eq_without_eq)] 323 | #[derive(Clone, PartialEq, ::prost::Message)] 324 | pub struct PlaintextBlob { 325 | /// The unencrypted value. 326 | #[prost(bytes = "vec", tag = "1")] 327 | pub value: ::prost::alloc::vec::Vec, 328 | /// The version of the value. Can be used by client to verify version integrity. 329 | #[prost(int64, tag = "2")] 330 | pub version: i64, 331 | } 332 | /// ErrorCodes to be used in `ErrorResponse` 333 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] 334 | #[repr(i32)] 335 | pub enum ErrorCode { 336 | /// Default protobuf Enum value. Will not be used as `ErrorCode` by server. 337 | Unknown = 0, 338 | /// Used when the request contains mismatched version (either key or global) 339 | /// in `PutObjectRequest`. For more info refer `PutObjectRequest`. 340 | ConflictException = 1, 341 | /// Used in the following cases: 342 | /// - The request was missing a required argument. 343 | /// - The specified argument was invalid, incomplete or in the wrong format. 344 | /// - The request body of api cannot be deserialized into corresponding protobuf object. 345 | InvalidRequestException = 2, 346 | /// Used when an internal server error occurred, client is probably at no fault and can safely retry 347 | /// this error with exponential backoff. 348 | InternalServerException = 3, 349 | /// Used when the specified `key` in a `GetObjectRequest` does not exist. 350 | NoSuchKeyException = 4, 351 | /// Used when authentication fails or in case of an unauthorized request. 352 | AuthException = 5, 353 | } 354 | impl ErrorCode { 355 | /// String value of the enum field names used in the ProtoBuf definition. 356 | /// 357 | /// The values are not transformed in any way and thus are considered stable 358 | /// (if the ProtoBuf definition does not change) and safe for programmatic use. 359 | pub fn as_str_name(&self) -> &'static str { 360 | match self { 361 | ErrorCode::Unknown => "UNKNOWN", 362 | ErrorCode::ConflictException => "CONFLICT_EXCEPTION", 363 | ErrorCode::InvalidRequestException => "INVALID_REQUEST_EXCEPTION", 364 | ErrorCode::InternalServerException => "INTERNAL_SERVER_EXCEPTION", 365 | ErrorCode::NoSuchKeyException => "NO_SUCH_KEY_EXCEPTION", 366 | ErrorCode::AuthException => "AUTH_EXCEPTION", 367 | } 368 | } 369 | /// Creates an enum from field names used in the ProtoBuf definition. 370 | pub fn from_str_name(value: &str) -> ::core::option::Option { 371 | match value { 372 | "UNKNOWN" => Some(Self::Unknown), 373 | "CONFLICT_EXCEPTION" => Some(Self::ConflictException), 374 | "INVALID_REQUEST_EXCEPTION" => Some(Self::InvalidRequestException), 375 | "INTERNAL_SERVER_EXCEPTION" => Some(Self::InternalServerException), 376 | "NO_SUCH_KEY_EXCEPTION" => Some(Self::NoSuchKeyException), 377 | "AUTH_EXCEPTION" => Some(Self::AuthException), 378 | _ => None, 379 | } 380 | } 381 | } 382 | -------------------------------------------------------------------------------- /src/util/key_obfuscator.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Error, ErrorKind}; 2 | 3 | use base64::prelude::BASE64_STANDARD_NO_PAD; 4 | use base64::Engine; 5 | use bitcoin_hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; 6 | 7 | use crate::crypto::chacha20poly1305::ChaCha20Poly1305; 8 | 9 | /// [`KeyObfuscator`] is a utility to obfuscate and deobfuscate storage 10 | /// keys to be used for VSS operations. 11 | /// 12 | /// It provides client-side deterministic encryption of given keys using ChaCha20-Poly1305. 13 | pub struct KeyObfuscator { 14 | obfuscation_key: [u8; 32], 15 | hashing_key: [u8; 32], 16 | } 17 | 18 | impl KeyObfuscator { 19 | /// Constructs a new instance. 20 | pub fn new(obfuscation_master_key: [u8; 32]) -> KeyObfuscator { 21 | let (obfuscation_key, hashing_key) = 22 | Self::derive_obfuscation_and_hashing_keys(&obfuscation_master_key); 23 | Self { obfuscation_key, hashing_key } 24 | } 25 | } 26 | 27 | const TAG_LENGTH: usize = 16; 28 | const NONCE_LENGTH: usize = 12; 29 | 30 | impl KeyObfuscator { 31 | /// Obfuscates the given key. 32 | pub fn obfuscate(&self, key: &str) -> String { 33 | let key_bytes = key.as_bytes(); 34 | let mut ciphertext = 35 | Vec::with_capacity(key_bytes.len() + TAG_LENGTH + NONCE_LENGTH + TAG_LENGTH); 36 | ciphertext.extend_from_slice(&key_bytes); 37 | 38 | // Encrypt key in-place using a synthetic nonce. 39 | let (mut nonce, tag) = self.encrypt(&mut ciphertext, key.as_bytes()); 40 | 41 | // Wrap the synthetic nonce to store along-side key. 42 | let (_, nonce_tag) = self.encrypt(&mut nonce, &ciphertext); 43 | 44 | debug_assert_eq!(tag.len(), TAG_LENGTH); 45 | ciphertext.extend_from_slice(&tag); 46 | debug_assert_eq!(nonce.len(), NONCE_LENGTH); 47 | ciphertext.extend_from_slice(&nonce); 48 | debug_assert_eq!(nonce_tag.len(), TAG_LENGTH); 49 | ciphertext.extend_from_slice(&nonce_tag); 50 | BASE64_STANDARD_NO_PAD.encode(ciphertext) 51 | } 52 | 53 | /// Deobfuscates the given obfuscated_key. 54 | pub fn deobfuscate(&self, obfuscated_key: &str) -> Result { 55 | let obfuscated_key_bytes = BASE64_STANDARD_NO_PAD.decode(obfuscated_key).map_err(|e| { 56 | let msg = format!( 57 | "Failed to decode base64 while deobfuscating key: {}, Error: {}", 58 | obfuscated_key, e 59 | ); 60 | Error::new(ErrorKind::InvalidData, msg) 61 | })?; 62 | 63 | if obfuscated_key_bytes.len() < TAG_LENGTH + NONCE_LENGTH + TAG_LENGTH { 64 | let msg = format!( 65 | "Failed to deobfuscate, obfuscated_key was of invalid length. \ 66 | Obfuscated key should at least have {} bytes, found: {}. Key: {}.", 67 | (TAG_LENGTH + NONCE_LENGTH + TAG_LENGTH), 68 | obfuscated_key_bytes.len(), 69 | obfuscated_key 70 | ); 71 | return Err(Error::new(ErrorKind::InvalidData, msg)); 72 | } 73 | 74 | // Split obfuscated_key into ciphertext, tag(for ciphertext), wrapped_nonce, tag(for wrapped_nonce). 75 | let (ciphertext, remaining) = obfuscated_key_bytes 76 | .split_at(obfuscated_key_bytes.len() - TAG_LENGTH - NONCE_LENGTH - TAG_LENGTH); 77 | let (tag, remaining) = remaining.split_at(TAG_LENGTH); 78 | let (wrapped_nonce_bytes, wrapped_nonce_tag) = remaining.split_at(NONCE_LENGTH); 79 | debug_assert_eq!(wrapped_nonce_tag.len(), TAG_LENGTH); 80 | 81 | // Unwrap wrapped_nonce to get nonce. 82 | let mut wrapped_nonce = [0u8; NONCE_LENGTH]; 83 | wrapped_nonce.clone_from_slice(&wrapped_nonce_bytes); 84 | self.decrypt(&mut wrapped_nonce, ciphertext, wrapped_nonce_tag).map_err(|_| { 85 | let msg = format!( 86 | "Failed to decrypt wrapped nonce, for key: {}, Invalid Tag.", 87 | obfuscated_key 88 | ); 89 | Error::new(ErrorKind::InvalidData, msg) 90 | })?; 91 | 92 | // Decrypt ciphertext using nonce. 93 | let mut cipher = ChaCha20Poly1305::new(&self.obfuscation_key, &wrapped_nonce, &[]); 94 | let mut ciphertext = ciphertext.to_vec(); 95 | cipher.decrypt_inplace(&mut ciphertext, tag).map_err(|_| { 96 | let msg = format!("Failed to decrypt key: {}, Invalid Tag.", obfuscated_key); 97 | Error::new(ErrorKind::InvalidData, msg) 98 | })?; 99 | 100 | let original_key = String::from_utf8(ciphertext).map_err(|e| { 101 | let msg = format!( 102 | "Input was not valid utf8 while deobfuscating key: {}, Error: {}", 103 | obfuscated_key, e 104 | ); 105 | Error::new(ErrorKind::InvalidData, msg) 106 | })?; 107 | Ok(original_key) 108 | } 109 | 110 | /// Encrypts the given plaintext in-place using a HMAC generated nonce. 111 | fn encrypt( 112 | &self, mut plaintext: &mut [u8], initial_nonce_material: &[u8], 113 | ) -> ([u8; 12], [u8; 16]) { 114 | let nonce = self.generate_synthetic_nonce(initial_nonce_material); 115 | let mut cipher = ChaCha20Poly1305::new(&self.obfuscation_key, &nonce, &[]); 116 | let mut tag = [0u8; TAG_LENGTH]; 117 | cipher.encrypt_inplace(&mut plaintext, &mut tag); 118 | (nonce, tag) 119 | } 120 | 121 | /// Decrypts the given ciphertext in-place using a HMAC generated nonce. 122 | fn decrypt( 123 | &self, mut ciphertext: &mut [u8], initial_nonce_material: &[u8], tag: &[u8], 124 | ) -> Result<(), ()> { 125 | let nonce = self.generate_synthetic_nonce(initial_nonce_material); 126 | let mut cipher = ChaCha20Poly1305::new(&self.obfuscation_key, &nonce, &[]); 127 | cipher.decrypt_inplace(&mut ciphertext, tag) 128 | } 129 | 130 | /// Generate a HMAC based nonce using provided `initial_nonce_material`. 131 | fn generate_synthetic_nonce(&self, initial_nonce_material: &[u8]) -> [u8; 12] { 132 | let hmac = Self::hkdf(&self.hashing_key, initial_nonce_material); 133 | let mut nonce = [0u8; NONCE_LENGTH]; 134 | nonce[4..].copy_from_slice(&hmac[..8]); 135 | nonce 136 | } 137 | 138 | /// Derives the obfuscation and hashing keys from the master key. 139 | fn derive_obfuscation_and_hashing_keys( 140 | obfuscation_master_key: &[u8; 32], 141 | ) -> ([u8; 32], [u8; 32]) { 142 | let prk = Self::hkdf(obfuscation_master_key, "pseudo_random_key".as_bytes()); 143 | let k1 = Self::hkdf(&prk, "obfuscation_key".as_bytes()); 144 | let k2 = Self::hkdf(&prk, &[&k1[..], "hashing_key".as_bytes()].concat()); 145 | (k1, k2) 146 | } 147 | fn hkdf(initial_key_material: &[u8], salt: &[u8]) -> [u8; 32] { 148 | let mut engine = HmacEngine::::new(salt); 149 | engine.input(initial_key_material); 150 | Hmac::from_engine(engine).to_byte_array() 151 | } 152 | } 153 | 154 | #[cfg(test)] 155 | mod tests { 156 | use crate::util::key_obfuscator::KeyObfuscator; 157 | 158 | #[test] 159 | fn obfuscate_deobfuscate_deterministic() { 160 | let obfuscation_master_key = [42u8; 32]; 161 | let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); 162 | let expected_key = "a_semi_secret_key"; 163 | let obfuscated_key = key_obfuscator.obfuscate(expected_key); 164 | 165 | let actual_key = key_obfuscator.deobfuscate(obfuscated_key.as_str()).unwrap(); 166 | assert_eq!(actual_key, expected_key); 167 | assert_eq!( 168 | obfuscated_key, 169 | "cMoet5WTvl0nYds+VW7JPCtXUq24DtMG2dR9apAi/T5jy8eNIEyDrUAJBS4geeUuX+XGXPqlizIByOip2g" 170 | ); 171 | } 172 | 173 | use proptest::prelude::*; 174 | 175 | proptest! { 176 | #[test] 177 | fn obfuscate_deobfuscate_proptest(expected_key in "[a-zA-Z0-9_!@#,;:%\\s\\*\\$\\^&\\(\\)\\[\\]\\{\\}\\.]*", obfuscation_master_key in any::<[u8; 32]>()) { 178 | let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); 179 | let obfuscated_key = key_obfuscator.obfuscate(&expected_key); 180 | let actual_key = key_obfuscator.deobfuscate(obfuscated_key.as_str()).unwrap(); 181 | assert_eq!(actual_key, expected_key); 182 | } 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | /// Contains [`StorableBuilder`] utility. 2 | /// 3 | /// [`StorableBuilder`]: storable_builder::StorableBuilder 4 | pub mod storable_builder; 5 | 6 | /// Contains retry utilities. 7 | pub mod retry; 8 | 9 | /// Contains [`KeyObfuscator`] utility. 10 | /// 11 | /// [`KeyObfuscator`]: key_obfuscator::KeyObfuscator 12 | pub mod key_obfuscator; 13 | -------------------------------------------------------------------------------- /src/util/retry.rs: -------------------------------------------------------------------------------- 1 | use rand::Rng; 2 | use std::error::Error; 3 | use std::future::Future; 4 | use std::marker::PhantomData; 5 | use std::time::Duration; 6 | 7 | /// A function that performs and retries the given operation according to a retry policy. 8 | /// 9 | /// **Caution**: A retry policy without the number of attempts capped by [`MaxAttemptsRetryPolicy`] 10 | /// decorator will result in infinite retries. 11 | /// 12 | /// **Example** 13 | /// ```rust 14 | /// # use std::time::Duration; 15 | /// # use vss_client::error::VssError; 16 | /// # use vss_client::util::retry::{ExponentialBackoffRetryPolicy, retry, RetryPolicy}; 17 | /// # 18 | /// # async fn operation() -> Result { 19 | /// # tokio::time::sleep(Duration::from_millis(10)).await; 20 | /// # Ok(42) 21 | /// # } 22 | /// # 23 | /// let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(100)) 24 | /// .with_max_attempts(5) 25 | /// .with_max_total_delay(Duration::from_secs(2)) 26 | /// .with_max_jitter(Duration::from_millis(30)) 27 | /// .skip_retry_on_error(|e| matches!(e, VssError::InvalidRequestError(..))); 28 | /// 29 | /// let result = retry(operation, &retry_policy); 30 | ///``` 31 | /// 32 | /// To use a retry policy as a member in a [`Send`] & [`Sync`] safe struct which needs to have known 33 | /// size at compile time, we can specify its concrete type as follows: 34 | /// ``` 35 | /// # use std::time::Duration; 36 | /// # use vss_client::error::VssError; 37 | /// # use vss_client::util::retry::{ExponentialBackoffRetryPolicy, FilteredRetryPolicy, retry, RetryPolicy}; 38 | /// 39 | /// type VssRetryPolicy = FilteredRetryPolicy, Box bool>>; 40 | /// 41 | /// struct SomeStruct { 42 | /// retry_policy: VssRetryPolicy, 43 | /// } 44 | /// 45 | /// impl SomeStruct { 46 | /// fn new() -> Self { 47 | /// let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(100)) 48 | /// .skip_retry_on_error(Box::new(|e: &VssError| { matches!( e, VssError::NoSuchKeyError(..)) }) as _); 49 | /// Self { retry_policy } 50 | /// } 51 | /// } 52 | /// ``` 53 | pub async fn retry(mut operation: F, retry_policy: &R) -> Result 54 | where 55 | R: RetryPolicy, 56 | F: FnMut() -> Fut, 57 | Fut: Future>, 58 | E: Error, 59 | { 60 | let mut attempts_made = 0; 61 | let mut accumulated_delay = Duration::ZERO; 62 | loop { 63 | match operation().await { 64 | Ok(result) => return Ok(result), 65 | Err(err) => { 66 | attempts_made += 1; 67 | if let Some(delay) = retry_policy.next_delay(&RetryContext { 68 | attempts_made, 69 | accumulated_delay, 70 | error: &err, 71 | }) { 72 | tokio::time::sleep(delay).await; 73 | accumulated_delay += delay; 74 | } else { 75 | return Err(err); 76 | } 77 | }, 78 | } 79 | } 80 | } 81 | 82 | /// Provides the logic for how and when to perform retries. 83 | pub trait RetryPolicy: Sized { 84 | /// The error type returned by the `operation` in `retry`. 85 | type E: Error; 86 | 87 | /// Returns the duration to wait before trying the next attempt. 88 | /// `context` represents the context of a retry operation. 89 | /// 90 | /// If `None` is returned then no further retry attempt is made. 91 | fn next_delay(&self, context: &RetryContext) -> Option; 92 | 93 | /// Returns a new `RetryPolicy` that respects the given maximum attempts. 94 | fn with_max_attempts(self, max_attempts: u32) -> MaxAttemptsRetryPolicy { 95 | MaxAttemptsRetryPolicy { inner_policy: self, max_attempts } 96 | } 97 | 98 | /// Returns a new `RetryPolicy` that respects the given total delay. 99 | fn with_max_total_delay(self, max_total_delay: Duration) -> MaxTotalDelayRetryPolicy { 100 | MaxTotalDelayRetryPolicy { inner_policy: self, max_total_delay } 101 | } 102 | 103 | /// Returns a new `RetryPolicy` that adds jitter(random delay) to underlying policy. 104 | fn with_max_jitter(self, max_jitter: Duration) -> JitteredRetryPolicy { 105 | JitteredRetryPolicy { inner_policy: self, max_jitter } 106 | } 107 | 108 | /// Skips retrying on errors that evaluate to `true` after applying `function`. 109 | fn skip_retry_on_error(self, function: F) -> FilteredRetryPolicy 110 | where 111 | F: 'static + Fn(&Self::E) -> bool, 112 | { 113 | FilteredRetryPolicy { inner_policy: self, function } 114 | } 115 | } 116 | 117 | /// Represents the context of a retry operation. 118 | /// 119 | /// The context holds key information about the retry operation 120 | /// such as how many attempts have been made until now, the accumulated 121 | /// delay between retries, and the error that triggered the retry. 122 | pub struct RetryContext<'a, E: Error> { 123 | /// The number attempts made until now, before attempting the next retry. 124 | attempts_made: u32, 125 | 126 | /// The amount of artificial delay we have already waited in between previous 127 | /// attempts. Does not include the time taken to execute the operation. 128 | accumulated_delay: Duration, 129 | 130 | /// The error encountered in the previous attempt. 131 | error: &'a E, 132 | } 133 | 134 | /// The exponential backoff strategy is a retry approach that doubles the delay between retries. 135 | /// A combined exponential backoff and jitter strategy is recommended that is ["Exponential Backoff and Jitter"](https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/). 136 | /// This is helpful to avoid [Thundering Herd Problem](https://en.wikipedia.org/wiki/Thundering_herd_problem). 137 | pub struct ExponentialBackoffRetryPolicy { 138 | /// The base delay duration for the backoff algorithm. First retry is `base_delay` after first attempt. 139 | base_delay: Duration, 140 | phantom: PhantomData, 141 | } 142 | 143 | impl ExponentialBackoffRetryPolicy { 144 | /// Constructs a new instance using `base_delay`. 145 | /// 146 | /// `base_delay` is the base delay duration for the backoff algorithm. First retry is `base_delay` 147 | /// after first attempt. 148 | pub fn new(base_delay: Duration) -> ExponentialBackoffRetryPolicy { 149 | Self { base_delay, phantom: PhantomData } 150 | } 151 | } 152 | 153 | impl RetryPolicy for ExponentialBackoffRetryPolicy { 154 | type E = E; 155 | fn next_delay(&self, context: &RetryContext) -> Option { 156 | let backoff_factor = 2_u32.pow(context.attempts_made) - 1; 157 | let delay = self.base_delay * backoff_factor; 158 | Some(delay) 159 | } 160 | } 161 | 162 | /// Decorates the given `RetryPolicy` to respect the given maximum attempts. 163 | pub struct MaxAttemptsRetryPolicy { 164 | /// The underlying retry policy to use. 165 | inner_policy: T, 166 | /// The maximum number of attempts to retry. 167 | max_attempts: u32, 168 | } 169 | 170 | impl RetryPolicy for MaxAttemptsRetryPolicy { 171 | type E = T::E; 172 | fn next_delay(&self, context: &RetryContext) -> Option { 173 | if self.max_attempts == context.attempts_made { 174 | None 175 | } else { 176 | self.inner_policy.next_delay(context) 177 | } 178 | } 179 | } 180 | 181 | /// Decorates the given `RetryPolicy` to respect the given maximum total delay. 182 | pub struct MaxTotalDelayRetryPolicy { 183 | /// The underlying retry policy to use. 184 | inner_policy: T, 185 | /// The maximum accumulated delay that will be allowed over all attempts. 186 | max_total_delay: Duration, 187 | } 188 | 189 | impl RetryPolicy for MaxTotalDelayRetryPolicy { 190 | type E = T::E; 191 | fn next_delay(&self, context: &RetryContext) -> Option { 192 | let next_delay = self.inner_policy.next_delay(context); 193 | if let Some(next_delay) = next_delay { 194 | if self.max_total_delay < context.accumulated_delay + next_delay { 195 | return None; 196 | } 197 | } 198 | next_delay 199 | } 200 | } 201 | 202 | /// Decorates the given `RetryPolicy` and adds jitter (random delay) to it. This can make retries 203 | /// more spread out and less likely to all fail at once. 204 | pub struct JitteredRetryPolicy { 205 | /// The underlying retry policy to use. 206 | inner_policy: T, 207 | /// The maximum amount of random jitter to apply to the delay. 208 | max_jitter: Duration, 209 | } 210 | 211 | impl RetryPolicy for JitteredRetryPolicy { 212 | type E = T::E; 213 | fn next_delay(&self, context: &RetryContext) -> Option { 214 | if let Some(base_delay) = self.inner_policy.next_delay(context) { 215 | let mut rng = rand::thread_rng(); 216 | let jitter = 217 | Duration::from_micros(rng.gen_range(0..self.max_jitter.as_micros() as u64)); 218 | Some(base_delay + jitter) 219 | } else { 220 | None 221 | } 222 | } 223 | } 224 | 225 | /// Decorates the given `RetryPolicy` by not retrying on errors that match the given function. 226 | pub struct FilteredRetryPolicy { 227 | inner_policy: T, 228 | function: F, 229 | } 230 | 231 | impl RetryPolicy for FilteredRetryPolicy 232 | where 233 | T: RetryPolicy, 234 | F: Fn(&E) -> bool, 235 | E: Error, 236 | { 237 | type E = T::E; 238 | fn next_delay(&self, context: &RetryContext) -> Option { 239 | if (self.function)(&context.error) { 240 | None 241 | } else { 242 | self.inner_policy.next_delay(context) 243 | } 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /src/util/storable_builder.rs: -------------------------------------------------------------------------------- 1 | use crate::crypto::chacha20poly1305::ChaCha20Poly1305; 2 | use crate::types::{EncryptionMetadata, PlaintextBlob, Storable}; 3 | use ::prost::Message; 4 | use std::borrow::Borrow; 5 | use std::io; 6 | use std::io::{Error, ErrorKind}; 7 | 8 | /// [`StorableBuilder`] is a utility to build and deconstruct [`Storable`] objects. 9 | /// It provides client-side Encrypt-then-MAC using ChaCha20-Poly1305. 10 | pub struct StorableBuilder { 11 | data_encryption_key: [u8; 32], 12 | entropy_source: T, 13 | } 14 | 15 | impl StorableBuilder { 16 | /// Constructs a new instance. 17 | pub fn new(data_encryption_key: [u8; 32], entropy_source: T) -> StorableBuilder { 18 | Self { data_encryption_key, entropy_source } 19 | } 20 | } 21 | 22 | /// A trait representing a source for generating entropy/randomness. 23 | pub trait EntropySource { 24 | /// Fills a buffer with random bytes. 25 | /// 26 | /// This method must generate the specified number of random bytes and write them into the given 27 | /// buffer. It is expected that this method will be cryptographically secure and suitable for use 28 | /// cases requiring strong randomness, such as generating nonces or secret keys. 29 | fn fill_bytes(&self, buffer: &mut [u8]); 30 | } 31 | 32 | const CHACHA20_CIPHER_NAME: &'static str = "ChaCha20Poly1305"; 33 | 34 | impl StorableBuilder { 35 | /// Creates a [`Storable`] that can be serialized and stored as `value` in [`PutObjectRequest`]. 36 | /// 37 | /// Uses ChaCha20 for encrypting `input` and Poly1305 for generating a mac/tag. 38 | /// 39 | /// Refer to docs on [`Storable`] for more information. 40 | /// 41 | /// [`PutObjectRequest`]: crate::types::PutObjectRequest 42 | pub fn build(&self, input: Vec, version: i64) -> Storable { 43 | let mut nonce = vec![0u8; 12]; 44 | self.entropy_source.fill_bytes(&mut nonce[4..]); 45 | 46 | let mut data_blob = PlaintextBlob { value: input, version }.encode_to_vec(); 47 | 48 | let mut cipher = ChaCha20Poly1305::new(&self.data_encryption_key, &nonce, &[]); 49 | let mut tag = vec![0u8; 16]; 50 | cipher.encrypt_inplace(&mut data_blob, &mut tag); 51 | Storable { 52 | data: data_blob, 53 | encryption_metadata: Some(EncryptionMetadata { 54 | nonce, 55 | tag, 56 | cipher_format: CHACHA20_CIPHER_NAME.to_string(), 57 | }), 58 | } 59 | } 60 | 61 | /// Deconstructs the provided [`Storable`] and returns constituent decrypted data and its 62 | /// corresponding version as stored at the time of [`PutObjectRequest`]. 63 | /// 64 | /// [`PutObjectRequest`]: crate::types::PutObjectRequest 65 | pub fn deconstruct(&self, mut storable: Storable) -> io::Result<(Vec, i64)> { 66 | let encryption_metadata = storable.encryption_metadata.unwrap(); 67 | let mut cipher = 68 | ChaCha20Poly1305::new(&self.data_encryption_key, &encryption_metadata.nonce, &[]); 69 | 70 | cipher 71 | .decrypt_inplace(&mut storable.data, encryption_metadata.tag.borrow()) 72 | .map_err(|_| Error::new(ErrorKind::InvalidData, "Invalid Tag"))?; 73 | 74 | let data_blob = PlaintextBlob::decode(&storable.data[..]) 75 | .map_err(|e| Error::new(ErrorKind::InvalidData, e))?; 76 | Ok((data_blob.value, data_blob.version)) 77 | } 78 | } 79 | 80 | #[cfg(test)] 81 | mod tests { 82 | use super::*; 83 | 84 | pub struct TestEntropyProvider; 85 | impl EntropySource for TestEntropyProvider { 86 | /// A terrible implementation which fills a buffer with bytes from a simple counter for testing 87 | /// purposes. 88 | fn fill_bytes(&self, buffer: &mut [u8]) { 89 | for (i, byte) in buffer.iter_mut().enumerate() { 90 | *byte = (i % 256) as u8; 91 | } 92 | } 93 | } 94 | 95 | #[test] 96 | fn encrypt_decrypt() { 97 | let test_entropy_provider = TestEntropyProvider; 98 | let mut data_key = [0u8; 32]; 99 | test_entropy_provider.fill_bytes(&mut data_key); 100 | let storable_builder = StorableBuilder { 101 | data_encryption_key: data_key, 102 | entropy_source: test_entropy_provider, 103 | }; 104 | let expected_data = b"secret".to_vec(); 105 | let expected_version = 8; 106 | let storable = storable_builder.build(expected_data.clone(), expected_version); 107 | 108 | let (actual_data, actual_version) = storable_builder.deconstruct(storable).unwrap(); 109 | assert_eq!(actual_data, expected_data); 110 | assert_eq!(actual_version, expected_version); 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /tests/lnurl_auth_jwt_tests.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "lnurl-auth")] 2 | mod lnurl_auth_jwt_tests { 3 | use base64::engine::general_purpose::URL_SAFE_NO_PAD; 4 | use base64::Engine; 5 | use bitcoin::bip32::Xpriv; 6 | use bitcoin::Network; 7 | use mockito::Matcher; 8 | use serde_json::json; 9 | use std::collections::HashMap; 10 | use std::time::SystemTime; 11 | use vss_client::headers::LnurlAuthToJwtProvider; 12 | use vss_client::headers::VssHeaderProvider; 13 | 14 | const APPLICATION_JSON: &'static str = "application/json"; 15 | 16 | fn lnurl_auth_response(jwt: &str) -> String { 17 | json!({ 18 | "status": "OK", 19 | "token": jwt, 20 | }) 21 | .to_string() 22 | } 23 | 24 | fn jwt_with_expiry(exp: u64) -> String { 25 | let claims = json!({ 26 | "exp": exp, 27 | }) 28 | .to_string(); 29 | let ignored = URL_SAFE_NO_PAD.encode("ignored"); 30 | let encoded = URL_SAFE_NO_PAD.encode(claims); 31 | format!("{}.{}.{}", ignored, encoded, ignored) 32 | } 33 | 34 | #[tokio::test] 35 | async fn test_lnurl_auth_jwt() { 36 | // Initialize LNURL Auth JWT provider connecting to the mock server. 37 | let addr = mockito::server_address(); 38 | let base_url = format!("http://localhost:{}", addr.port()); 39 | let parent_key = Xpriv::new_master(Network::Testnet, &[0; 32]).unwrap(); 40 | let lnurl_auth_jwt = 41 | LnurlAuthToJwtProvider::new(parent_key, base_url.clone(), HashMap::new()).unwrap(); 42 | { 43 | // First request will be provided with an expired JWT token. 44 | let k1 = "0000000000000000000000000000000000000000000000000000000000000000"; 45 | let expired_jwt = jwt_with_expiry(0); 46 | let lnurl = mockito::mock("GET", "/") 47 | .expect(1) 48 | .with_status(200) 49 | .with_body(format!("{}/verify?tag=login&k1={}", base_url, k1)) 50 | .create(); 51 | let lnurl_verification = mockito::mock("GET", "/verify") 52 | .match_query(Matcher::AllOf(vec![ 53 | Matcher::UrlEncoded("k1".into(), k1.into()), 54 | Matcher::Regex("sig=".into()), 55 | Matcher::Regex("key=".into()), 56 | ])) 57 | .expect(1) 58 | .with_status(200) 59 | .with_header(reqwest::header::CONTENT_TYPE.as_str(), APPLICATION_JSON) 60 | .with_body(lnurl_auth_response(&expired_jwt)) 61 | .create(); 62 | assert_eq!( 63 | lnurl_auth_jwt.get_headers(&[]).await.unwrap().get("Authorization").unwrap(), 64 | &format!("Bearer {}", expired_jwt), 65 | ); 66 | lnurl.assert(); 67 | lnurl_verification.assert(); 68 | } 69 | { 70 | // Second request will be provided with a non-expired JWT token. 71 | // This will be cached. 72 | let k1 = "1000000000000000000000000000000000000000000000000000000000000000"; 73 | let valid_jwt = jwt_with_expiry( 74 | SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs() 75 | + 60 * 60 * 24 * 365, 76 | ); 77 | let lnurl = mockito::mock("GET", "/") 78 | .expect(1) 79 | .with_status(200) 80 | .with_body(format!("{}/verify?tag=login&k1={}", base_url, k1)) 81 | .create(); 82 | let lnurl_verification = mockito::mock("GET", "/verify") 83 | .match_query(Matcher::AllOf(vec![ 84 | Matcher::UrlEncoded("k1".into(), k1.into()), 85 | Matcher::Regex("sig=".to_string()), 86 | Matcher::Regex("key=".to_string()), 87 | ])) 88 | .expect(1) 89 | .with_status(200) 90 | .with_header(reqwest::header::CONTENT_TYPE.as_str(), APPLICATION_JSON) 91 | .with_body(lnurl_auth_response(&valid_jwt)) 92 | .create(); 93 | assert_eq!( 94 | lnurl_auth_jwt.get_headers(&[]).await.unwrap().get("Authorization").unwrap(), 95 | &format!("Bearer {}", valid_jwt), 96 | ); 97 | assert_eq!( 98 | lnurl_auth_jwt.get_headers(&[]).await.unwrap().get("Authorization").unwrap(), 99 | &format!("Bearer {}", valid_jwt), 100 | ); 101 | lnurl.assert(); 102 | lnurl_verification.assert(); 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /tests/retry_tests.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod retry_tests { 3 | use std::io; 4 | use std::sync::atomic::{AtomicU32, Ordering}; 5 | use std::sync::Arc; 6 | use std::time::Duration; 7 | 8 | use vss_client::error::VssError; 9 | use vss_client::util::retry::{retry, ExponentialBackoffRetryPolicy, RetryPolicy}; 10 | 11 | #[tokio::test] 12 | async fn test_async_retry() { 13 | let base_delay = Duration::from_millis(10); 14 | let max_attempts = 3; 15 | let max_total_delay = Duration::from_secs(60); 16 | let max_jitter = Duration::from_millis(5); 17 | 18 | let exponential_backoff_jitter_policy = ExponentialBackoffRetryPolicy::new(base_delay) 19 | .skip_retry_on_error(|e| matches!(e, VssError::InvalidRequestError(..))) 20 | .with_max_attempts(max_attempts) 21 | .with_max_total_delay(max_total_delay) 22 | .with_max_jitter(max_jitter); 23 | 24 | let mut call_count = Arc::new(AtomicU32::new(0)); 25 | let count = call_count.clone(); 26 | let async_function = move || { 27 | let count = count.clone(); 28 | async move { 29 | let attempts_made = count.fetch_add(1, Ordering::SeqCst); 30 | if attempts_made < max_attempts - 1 { 31 | return Err(VssError::InternalServerError("Failure".to_string())); 32 | } 33 | tokio::time::sleep(Duration::from_millis(100)).await; 34 | Ok(42) 35 | } 36 | }; 37 | 38 | let result = retry(async_function, &exponential_backoff_jitter_policy).await; 39 | assert_eq!(result.ok(), Some(42)); 40 | assert_eq!(call_count.load(Ordering::SeqCst), max_attempts); 41 | 42 | call_count = Arc::new(AtomicU32::new(0)); 43 | let count = call_count.clone(); 44 | let failing_async_function = move || { 45 | let count = count.clone(); 46 | async move { 47 | count.fetch_add(1, Ordering::SeqCst); 48 | tokio::time::sleep(Duration::from_millis(100)).await; 49 | Err::<(), VssError>(VssError::InternalServerError("Failed".to_string())) 50 | } 51 | }; 52 | 53 | let failed_result = retry(failing_async_function, &exponential_backoff_jitter_policy).await; 54 | assert!(failed_result.is_err()); 55 | assert_eq!(call_count.load(Ordering::SeqCst), 3); 56 | } 57 | 58 | #[tokio::test] 59 | async fn test_retry_on_all_errors() { 60 | let retry_policy = 61 | ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)).with_max_attempts(3); 62 | 63 | let call_count = Arc::new(AtomicU32::new(0)); 64 | let count = call_count.clone(); 65 | let failing_async_function = move || { 66 | let count = count.clone(); 67 | async move { 68 | count.fetch_add(1, Ordering::SeqCst); 69 | tokio::time::sleep(Duration::from_millis(100)).await; 70 | Err::<(), io::Error>(io::Error::new(io::ErrorKind::InvalidData, "Failure")) 71 | } 72 | }; 73 | 74 | let failed_result = retry(failing_async_function, &retry_policy).await; 75 | assert!(failed_result.is_err()); 76 | assert_eq!(call_count.load(Ordering::SeqCst), 3); 77 | } 78 | 79 | #[tokio::test] 80 | async fn test_retry_capped_by_max_total_delay() { 81 | let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(100)) 82 | .with_max_total_delay(Duration::from_millis(350)); 83 | 84 | let call_count = Arc::new(AtomicU32::new(0)); 85 | let count = call_count.clone(); 86 | let failing_async_function = move || { 87 | let count = count.clone(); 88 | async move { 89 | count.fetch_add(1, Ordering::SeqCst); 90 | tokio::time::sleep(Duration::from_millis(100)).await; 91 | Err::<(), VssError>(VssError::InternalServerError("Failed".to_string())) 92 | } 93 | }; 94 | 95 | let failed_result = retry(failing_async_function, &retry_policy).await; 96 | assert!(failed_result.is_err()); 97 | assert_eq!(call_count.load(Ordering::SeqCst), 2); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use async_trait::async_trait; 4 | use mockito::{self, Matcher}; 5 | use prost::Message; 6 | use reqwest::header::CONTENT_TYPE; 7 | use std::collections::HashMap; 8 | use std::sync::Arc; 9 | use std::time::Duration; 10 | use vss_client::client::VssClient; 11 | use vss_client::error::VssError; 12 | use vss_client::headers::FixedHeaders; 13 | use vss_client::headers::VssHeaderProvider; 14 | use vss_client::headers::VssHeaderProviderError; 15 | 16 | use vss_client::types::{ 17 | DeleteObjectRequest, DeleteObjectResponse, ErrorCode, ErrorResponse, GetObjectRequest, 18 | GetObjectResponse, KeyValue, ListKeyVersionsRequest, ListKeyVersionsResponse, 19 | PutObjectRequest, PutObjectResponse, 20 | }; 21 | use vss_client::util::retry::{ExponentialBackoffRetryPolicy, RetryPolicy}; 22 | 23 | const APPLICATION_OCTET_STREAM: &'static str = "application/octet-stream"; 24 | 25 | const GET_OBJECT_ENDPOINT: &'static str = "/getObject"; 26 | const PUT_OBJECT_ENDPOINT: &'static str = "/putObjects"; 27 | const DELETE_OBJECT_ENDPOINT: &'static str = "/deleteObject"; 28 | const LIST_KEY_VERSIONS_ENDPOINT: &'static str = "/listKeyVersions"; 29 | 30 | #[tokio::test] 31 | async fn test_get() { 32 | // Spin-up mock server with mock response for given request. 33 | let base_url = mockito::server_url().to_string(); 34 | 35 | // Set up the mock request/response. 36 | let get_request = GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }; 37 | let mock_response = GetObjectResponse { 38 | value: Some(KeyValue { key: "k1".to_string(), version: 2, value: b"k1v2".to_vec() }), 39 | ..Default::default() 40 | }; 41 | 42 | // Register the mock endpoint with the mockito server. 43 | let mock_server = mockito::mock("POST", GET_OBJECT_ENDPOINT) 44 | .match_header(CONTENT_TYPE.as_str(), APPLICATION_OCTET_STREAM) 45 | .match_body(get_request.encode_to_vec()) 46 | .with_status(200) 47 | .with_body(mock_response.encode_to_vec()) 48 | .create(); 49 | 50 | // Create a new VssClient with the mock server URL. 51 | let client = VssClient::new(base_url, retry_policy()); 52 | 53 | let actual_result = client.get_object(&get_request).await.unwrap(); 54 | 55 | let expected_result = &mock_response; 56 | assert_eq!(actual_result, *expected_result); 57 | 58 | // Verify server endpoint was called exactly once. 59 | mock_server.expect(1).assert(); 60 | } 61 | 62 | #[tokio::test] 63 | async fn test_get_with_headers() { 64 | // Spin-up mock server with mock response for given request. 65 | let base_url = mockito::server_url().to_string(); 66 | 67 | // Set up the mock request/response. 68 | let get_request = GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }; 69 | let mock_response = GetObjectResponse { 70 | value: Some(KeyValue { key: "k1".to_string(), version: 2, value: b"k1v2".to_vec() }), 71 | ..Default::default() 72 | }; 73 | 74 | // Register the mock endpoint with the mockito server and provide expected headers. 75 | let mock_server = mockito::mock("POST", GET_OBJECT_ENDPOINT) 76 | .match_header(CONTENT_TYPE.as_str(), APPLICATION_OCTET_STREAM) 77 | .match_header("headerkey", "headervalue") 78 | .match_body(get_request.encode_to_vec()) 79 | .with_status(200) 80 | .with_body(mock_response.encode_to_vec()) 81 | .create(); 82 | 83 | // Create a new VssClient with the mock server URL and fixed headers. 84 | let header_provider = Arc::new(FixedHeaders::new(HashMap::from([( 85 | "headerkey".to_string(), 86 | "headervalue".to_string(), 87 | )]))); 88 | let client = VssClient::new_with_headers(base_url, retry_policy(), header_provider); 89 | 90 | let actual_result = client.get_object(&get_request).await.unwrap(); 91 | 92 | let expected_result = &mock_response; 93 | assert_eq!(actual_result, *expected_result); 94 | 95 | // Verify server endpoint was called exactly once. 96 | mock_server.expect(1).assert(); 97 | } 98 | 99 | #[tokio::test] 100 | async fn test_put() { 101 | // Spin-up mock server with mock response for given request. 102 | let base_url = mockito::server_url().to_string(); 103 | 104 | // Set up the mock request/response. 105 | let request = PutObjectRequest { 106 | store_id: "store".to_string(), 107 | global_version: Some(4), 108 | transaction_items: vec![KeyValue { 109 | key: "k1".to_string(), 110 | version: 2, 111 | value: b"k1v3".to_vec(), 112 | }], 113 | delete_items: vec![], 114 | }; 115 | let mock_response = PutObjectResponse::default(); 116 | 117 | // Register the mock endpoint with the mockito server. 118 | let mock_server = mockito::mock("POST", PUT_OBJECT_ENDPOINT) 119 | .match_header(CONTENT_TYPE.as_str(), APPLICATION_OCTET_STREAM) 120 | .match_body(request.encode_to_vec()) 121 | .with_status(200) 122 | .with_body(mock_response.encode_to_vec()) 123 | .create(); 124 | 125 | // Create a new VssClient with the mock server URL. 126 | let vss_client = VssClient::new(base_url, retry_policy()); 127 | let actual_result = vss_client.put_object(&request).await.unwrap(); 128 | 129 | let expected_result = &mock_response; 130 | assert_eq!(actual_result, *expected_result); 131 | 132 | // Verify server endpoint was called exactly once. 133 | mock_server.expect(1).assert(); 134 | } 135 | 136 | #[tokio::test] 137 | async fn test_delete() { 138 | // Spin-up mock server with mock response for given request. 139 | let base_url = mockito::server_url().to_string(); 140 | 141 | // Set up the mock request/response. 142 | let request = DeleteObjectRequest { 143 | store_id: "store".to_string(), 144 | key_value: Some(KeyValue { 145 | key: "k1".to_string(), 146 | version: 2, 147 | value: b"k1v3".to_vec(), 148 | }), 149 | }; 150 | let mock_response = DeleteObjectResponse::default(); 151 | 152 | // Register the mock endpoint with the mockito server. 153 | let mock_server = mockito::mock("POST", DELETE_OBJECT_ENDPOINT) 154 | .match_header(CONTENT_TYPE.as_str(), APPLICATION_OCTET_STREAM) 155 | .match_body(request.encode_to_vec()) 156 | .with_status(200) 157 | .with_body(mock_response.encode_to_vec()) 158 | .create(); 159 | 160 | // Create a new VssClient with the mock server URL. 161 | let vss_client = VssClient::new(base_url, retry_policy()); 162 | let actual_result = vss_client.delete_object(&request).await.unwrap(); 163 | 164 | let expected_result = &mock_response; 165 | assert_eq!(actual_result, *expected_result); 166 | 167 | // Verify server endpoint was called exactly once. 168 | mock_server.expect(1).assert(); 169 | } 170 | 171 | #[tokio::test] 172 | async fn test_list_key_versions() { 173 | // Spin-up mock server with mock response for given request. 174 | let base_url = mockito::server_url().to_string(); 175 | 176 | // Set up the mock request/response. 177 | let request = ListKeyVersionsRequest { 178 | store_id: "store".to_string(), 179 | page_size: Some(5), 180 | page_token: None, 181 | key_prefix: Some("k".into()), 182 | }; 183 | 184 | let mock_response = ListKeyVersionsResponse { 185 | key_versions: vec![ 186 | KeyValue { key: "k1".to_string(), version: 3, value: vec![] }, 187 | KeyValue { key: "k2".to_string(), version: 1, value: vec![] }, 188 | ], 189 | global_version: Some(4), 190 | next_page_token: Some("k2".into()), 191 | }; 192 | 193 | // Register the mock endpoint with the mockito server. 194 | let mock_server = mockito::mock("POST", LIST_KEY_VERSIONS_ENDPOINT) 195 | .match_header(CONTENT_TYPE.as_str(), APPLICATION_OCTET_STREAM) 196 | .match_body(request.encode_to_vec()) 197 | .with_status(200) 198 | .with_body(mock_response.encode_to_vec()) 199 | .create(); 200 | 201 | // Create a new VssClient with the mock server URL. 202 | let client = VssClient::new(base_url, retry_policy()); 203 | 204 | let actual_result = client.list_key_versions(&request).await.unwrap(); 205 | 206 | let expected_result = &mock_response; 207 | assert_eq!(actual_result, *expected_result); 208 | 209 | // Verify server endpoint was called exactly once. 210 | mock_server.expect(1).assert(); 211 | } 212 | 213 | #[tokio::test] 214 | async fn test_no_such_key_err_handling() { 215 | let base_url = mockito::server_url(); 216 | let vss_client = VssClient::new(base_url, retry_policy()); 217 | 218 | // NoSuchKeyError 219 | let error_response = ErrorResponse { 220 | error_code: ErrorCode::NoSuchKeyException.into(), 221 | message: "NoSuchKeyException".to_string(), 222 | }; 223 | let mock_server = mockito::mock("POST", GET_OBJECT_ENDPOINT) 224 | .with_status(409) 225 | .with_body(&error_response.encode_to_vec()) 226 | .create(); 227 | 228 | let get_result = vss_client 229 | .get_object(&GetObjectRequest { 230 | store_id: "store".to_string(), 231 | key: "non_existent_key".to_string(), 232 | }) 233 | .await; 234 | assert!(matches!(get_result.unwrap_err(), VssError::NoSuchKeyError { .. })); 235 | 236 | // Verify 1 request hit the server 237 | mock_server.expect(1).assert(); 238 | } 239 | 240 | #[tokio::test] 241 | async fn test_get_response_without_value() { 242 | let base_url = mockito::server_url(); 243 | let vss_client = VssClient::new(base_url, retry_policy()); 244 | 245 | // GetObjectResponse with None value 246 | let mock_response = GetObjectResponse { value: None, ..Default::default() }; 247 | let mock_server = mockito::mock("POST", GET_OBJECT_ENDPOINT) 248 | .with_status(200) 249 | .with_body(&mock_response.encode_to_vec()) 250 | .create(); 251 | 252 | let get_result = vss_client 253 | .get_object(&GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }) 254 | .await; 255 | assert!(matches!(get_result.unwrap_err(), VssError::InternalServerError { .. })); 256 | 257 | // Verify 1 request hit the server 258 | mock_server.expect(3).assert(); 259 | } 260 | 261 | #[tokio::test] 262 | async fn test_invalid_request_err_handling() { 263 | let base_url = mockito::server_url(); 264 | let vss_client = VssClient::new(base_url, retry_policy()); 265 | 266 | // Invalid Request Error 267 | let error_response = ErrorResponse { 268 | error_code: ErrorCode::InvalidRequestException.into(), 269 | message: "InvalidRequestException".to_string(), 270 | }; 271 | let mock_server = mockito::mock("POST", Matcher::Any) 272 | .with_status(400) 273 | .with_body(&error_response.encode_to_vec()) 274 | .create(); 275 | 276 | let get_result = vss_client 277 | .get_object(&GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }) 278 | .await; 279 | assert!(matches!(get_result.unwrap_err(), VssError::InvalidRequestError { .. })); 280 | 281 | let put_result = vss_client 282 | .put_object(&PutObjectRequest { 283 | store_id: "store".to_string(), 284 | global_version: Some(4), 285 | transaction_items: vec![KeyValue { 286 | key: "k1".to_string(), 287 | version: 2, 288 | value: b"k1v3".to_vec(), 289 | }], 290 | delete_items: vec![], 291 | }) 292 | .await; 293 | assert!(matches!(put_result.unwrap_err(), VssError::InvalidRequestError { .. })); 294 | 295 | let delete_result = vss_client 296 | .delete_object(&DeleteObjectRequest { 297 | store_id: "store".to_string(), 298 | key_value: Some(KeyValue { 299 | key: "k1".to_string(), 300 | version: 2, 301 | value: b"k1v3".to_vec(), 302 | }), 303 | }) 304 | .await; 305 | assert!(matches!(delete_result.unwrap_err(), VssError::InvalidRequestError { .. })); 306 | 307 | let list_result = vss_client 308 | .list_key_versions(&ListKeyVersionsRequest { 309 | store_id: "store".to_string(), 310 | page_size: Some(5), 311 | page_token: None, 312 | key_prefix: Some("k".into()), 313 | }) 314 | .await; 315 | assert!(matches!(list_result.unwrap_err(), VssError::InvalidRequestError { .. })); 316 | 317 | // Verify 4 requests hit the server 318 | mock_server.expect(4).assert(); 319 | } 320 | 321 | #[tokio::test] 322 | async fn test_auth_err_handling() { 323 | let base_url = mockito::server_url(); 324 | let vss_client = VssClient::new(base_url, retry_policy()); 325 | 326 | // Invalid Request Error 327 | let error_response = ErrorResponse { 328 | error_code: ErrorCode::AuthException.into(), 329 | message: "AuthException".to_string(), 330 | }; 331 | let mock_server = mockito::mock("POST", Matcher::Any) 332 | .with_status(401) 333 | .with_body(&error_response.encode_to_vec()) 334 | .create(); 335 | 336 | let get_result = vss_client 337 | .get_object(&GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }) 338 | .await; 339 | assert!(matches!(get_result.unwrap_err(), VssError::AuthError { .. })); 340 | 341 | let put_result = vss_client 342 | .put_object(&PutObjectRequest { 343 | store_id: "store".to_string(), 344 | global_version: Some(4), 345 | transaction_items: vec![KeyValue { 346 | key: "k1".to_string(), 347 | version: 2, 348 | value: b"k1v3".to_vec(), 349 | }], 350 | delete_items: vec![], 351 | }) 352 | .await; 353 | assert!(matches!(put_result.unwrap_err(), VssError::AuthError { .. })); 354 | 355 | let delete_result = vss_client 356 | .delete_object(&DeleteObjectRequest { 357 | store_id: "store".to_string(), 358 | key_value: Some(KeyValue { 359 | key: "k1".to_string(), 360 | version: 2, 361 | value: b"k1v3".to_vec(), 362 | }), 363 | }) 364 | .await; 365 | assert!(matches!(delete_result.unwrap_err(), VssError::AuthError { .. })); 366 | 367 | let list_result = vss_client 368 | .list_key_versions(&ListKeyVersionsRequest { 369 | store_id: "store".to_string(), 370 | page_size: Some(5), 371 | page_token: None, 372 | key_prefix: Some("k".into()), 373 | }) 374 | .await; 375 | assert!(matches!(list_result.unwrap_err(), VssError::AuthError { .. })); 376 | 377 | // Verify 4 requests hit the server 378 | mock_server.expect(4).assert(); 379 | } 380 | 381 | struct FailingHeaderProvider {} 382 | 383 | #[async_trait] 384 | impl VssHeaderProvider for FailingHeaderProvider { 385 | async fn get_headers( 386 | &self, _request: &[u8], 387 | ) -> Result, VssHeaderProviderError> { 388 | Err(VssHeaderProviderError::InvalidData { error: "test".to_string() }) 389 | } 390 | } 391 | 392 | #[tokio::test] 393 | async fn test_header_provider_error() { 394 | let get_request = GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }; 395 | let header_provider = Arc::new(FailingHeaderProvider {}); 396 | let client = 397 | VssClient::new_with_headers("notused".to_string(), retry_policy(), header_provider); 398 | let result = client.get_object(&get_request).await; 399 | 400 | assert!(matches!(result, Err(VssError::AuthError { .. }))); 401 | } 402 | 403 | #[tokio::test] 404 | async fn test_conflict_err_handling() { 405 | let base_url = mockito::server_url(); 406 | let vss_client = VssClient::new(base_url, retry_policy()); 407 | 408 | // Conflict Error 409 | let error_response = ErrorResponse { 410 | error_code: ErrorCode::ConflictException.into(), 411 | message: "ConflictException".to_string(), 412 | }; 413 | let mock_server = mockito::mock("POST", Matcher::Any) 414 | .with_status(409) 415 | .with_body(&error_response.encode_to_vec()) 416 | .create(); 417 | 418 | let put_result = vss_client 419 | .put_object(&PutObjectRequest { 420 | store_id: "store".to_string(), 421 | global_version: Some(4), 422 | transaction_items: vec![KeyValue { 423 | key: "k1".to_string(), 424 | version: 2, 425 | value: b"k1v3".to_vec(), 426 | }], 427 | delete_items: vec![], 428 | }) 429 | .await; 430 | assert!(matches!(put_result.unwrap_err(), VssError::ConflictError { .. })); 431 | 432 | // Verify 1 requests hit the server 433 | mock_server.expect(1).assert(); 434 | } 435 | 436 | #[tokio::test] 437 | async fn test_internal_server_err_handling() { 438 | let base_url = mockito::server_url(); 439 | let vss_client = VssClient::new(base_url, retry_policy()); 440 | 441 | // Internal Server Error 442 | let error_response = ErrorResponse { 443 | error_code: ErrorCode::InternalServerException.into(), 444 | message: "InternalServerException".to_string(), 445 | }; 446 | let mock_server = mockito::mock("POST", Matcher::Any) 447 | .with_status(500) 448 | .with_body(&error_response.encode_to_vec()) 449 | .create(); 450 | 451 | let get_result = vss_client 452 | .get_object(&GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }) 453 | .await; 454 | assert!(matches!(get_result.unwrap_err(), VssError::InternalServerError { .. })); 455 | 456 | let put_result = vss_client 457 | .put_object(&PutObjectRequest { 458 | store_id: "store".to_string(), 459 | global_version: Some(4), 460 | transaction_items: vec![KeyValue { 461 | key: "k1".to_string(), 462 | version: 2, 463 | value: b"k1v3".to_vec(), 464 | }], 465 | delete_items: vec![], 466 | }) 467 | .await; 468 | assert!(matches!(put_result.unwrap_err(), VssError::InternalServerError { .. })); 469 | 470 | let delete_result = vss_client 471 | .delete_object(&DeleteObjectRequest { 472 | store_id: "store".to_string(), 473 | key_value: Some(KeyValue { 474 | key: "k1".to_string(), 475 | version: 2, 476 | value: b"k1v3".to_vec(), 477 | }), 478 | }) 479 | .await; 480 | assert!(matches!(delete_result.unwrap_err(), VssError::InternalServerError { .. })); 481 | 482 | let list_result = vss_client 483 | .list_key_versions(&ListKeyVersionsRequest { 484 | store_id: "store".to_string(), 485 | page_size: Some(5), 486 | page_token: None, 487 | key_prefix: Some("k".into()), 488 | }) 489 | .await; 490 | assert!(matches!(list_result.unwrap_err(), VssError::InternalServerError { .. })); 491 | 492 | // Verify 4 requests hit the server 493 | mock_server.expect(12).assert(); 494 | } 495 | 496 | #[tokio::test] 497 | async fn test_internal_err_handling() { 498 | let base_url = mockito::server_url(); 499 | let vss_client = VssClient::new(base_url, retry_policy()); 500 | 501 | let error_response = 502 | ErrorResponse { error_code: 999, message: "UnknownException".to_string() }; 503 | let mut _mock_server = mockito::mock("POST", Matcher::Any) 504 | .with_status(999) 505 | .with_body(&error_response.encode_to_vec()) 506 | .create(); 507 | 508 | let get_request = GetObjectRequest { store_id: "store".to_string(), key: "k1".to_string() }; 509 | let get_result = vss_client.get_object(&get_request).await; 510 | assert!(matches!(get_result.unwrap_err(), VssError::InternalError { .. })); 511 | 512 | let put_request = PutObjectRequest { 513 | store_id: "store".to_string(), 514 | global_version: Some(4), 515 | transaction_items: vec![KeyValue { 516 | key: "k1".to_string(), 517 | version: 2, 518 | value: b"k1v3".to_vec(), 519 | }], 520 | delete_items: vec![], 521 | }; 522 | let put_result = vss_client.put_object(&put_request).await; 523 | assert!(matches!(put_result.unwrap_err(), VssError::InternalError { .. })); 524 | 525 | let list_request = ListKeyVersionsRequest { 526 | store_id: "store".to_string(), 527 | page_size: Some(5), 528 | page_token: None, 529 | key_prefix: Some("k".into()), 530 | }; 531 | let list_result = vss_client.list_key_versions(&list_request).await; 532 | assert!(matches!(list_result.unwrap_err(), VssError::InternalError { .. })); 533 | 534 | let malformed_error_response = b"malformed"; 535 | _mock_server = mockito::mock("POST", Matcher::Any) 536 | .with_status(409) 537 | .with_body(&malformed_error_response) 538 | .create(); 539 | 540 | let get_malformed_err_response = vss_client.get_object(&get_request).await; 541 | assert!(matches!(get_malformed_err_response.unwrap_err(), VssError::InternalError { .. })); 542 | 543 | let put_malformed_err_response = vss_client.put_object(&put_request).await; 544 | assert!(matches!(put_malformed_err_response.unwrap_err(), VssError::InternalError { .. })); 545 | 546 | let list_malformed_err_response = vss_client.list_key_versions(&list_request).await; 547 | assert!(matches!(list_malformed_err_response.unwrap_err(), VssError::InternalError { .. })); 548 | 549 | // Requests to endpoints are no longer mocked and will result in network error. 550 | drop(_mock_server); 551 | 552 | let get_network_err = vss_client.get_object(&get_request).await; 553 | assert!(matches!(get_network_err.unwrap_err(), VssError::InternalError { .. })); 554 | 555 | let put_network_err = vss_client.put_object(&put_request).await; 556 | assert!(matches!(put_network_err.unwrap_err(), VssError::InternalError { .. })); 557 | 558 | let list_network_err = vss_client.list_key_versions(&list_request).await; 559 | assert!(matches!(list_network_err.unwrap_err(), VssError::InternalError { .. })); 560 | } 561 | 562 | fn retry_policy() -> impl RetryPolicy { 563 | ExponentialBackoffRetryPolicy::new(Duration::from_millis(1)) 564 | .with_max_attempts(3) 565 | .skip_retry_on_error(|e| { 566 | matches!( 567 | e, 568 | VssError::NoSuchKeyError(..) 569 | | VssError::InvalidRequestError(..) 570 | | VssError::ConflictError(..) 571 | | VssError::AuthError(..) 572 | ) 573 | }) 574 | } 575 | } 576 | --------------------------------------------------------------------------------