├── .cargo └── config.toml ├── .github └── workflows │ └── lint_and_test.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── graphviz_architecture.dot └── src ├── backend.rs ├── common.rs ├── constants.rs ├── group.rs ├── lib.rs ├── main.rs ├── repo.rs └── rpc.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = [ 3 | "-A", "unused-imports", 4 | "-A", "unused-variables", 5 | "-A", "unused-mut", 6 | "-A", "dead-code", 7 | "-A", "clippy::clone-on-copy", 8 | "-A", "clippy::needless-borrows-for-generic_args", 9 | ] 10 | -------------------------------------------------------------------------------- /.github/workflows/lint_and_test.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | lint_and_test: 7 | runs-on: ${{ matrix.os }} 8 | strategy: 9 | matrix: 10 | os: [ubuntu-latest, macOS-latest] 11 | rust: [stable] 12 | 13 | steps: 14 | - name: Set up Rust toolchain 15 | uses: hecrj/setup-rust-action@v2 16 | with: 17 | rust-version: ${{ matrix.rust }} 18 | 19 | - name: Check out the code 20 | uses: actions/checkout@v4 21 | 22 | - name: Install Clippy 23 | run: rustup component add clippy 24 | 25 | - name: Run Clippy 26 | run: cargo clippy --all-targets --all-features -- -D warnings 27 | 28 | - name: Run tests 29 | env: 30 | RUST_MIN_STACK: 8388608 31 | run: cargo test --verbose -- --test-threads=1 --nocapture 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | 16 | 17 | # Added by cargo 18 | 19 | /target 20 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "save-dweb-backend" 3 | version = "0.1.3" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | iroh = "0.24.0" 8 | iroh-blobs = "0.24.0" 9 | veilid-core = { git = "https://gitlab.com/veilid/veilid.git", version = "0.4.3" } 10 | veilid-iroh-blobs = { git = "https://github.com/RangerMauve/veilid-iroh-blobs.git", version = "0.1.1" } 11 | tracing = "0.1" 12 | xdg = "2.4" 13 | tmpdir = "1" 14 | serde = "1.0.204" 15 | serde_cbor = "0.11.2" 16 | clap = { version = "4.5.9", features = ["derive"] } 17 | anyhow = "1.0.86" 18 | tokio = {version ="1.38.1", features=["full"] } 19 | tokio-stream = "0.1.15" 20 | async-stream = "0.3.5" 21 | futures = "0.3.31" 22 | futures-core = "0.3.31" 23 | futures-util = "0.3.31" 24 | bytes = "1.6.1" 25 | serial_test = "3.1.1" 26 | url = "2.5.2" 27 | hex = "0.4.3" 28 | rand = "0.8.5" 29 | base64 = "0.22.1" 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # *Save* DWeb Backend Technical Documentation 3 | 4 | 5 | ## 1.0 Introduction 6 | 7 | OpenArchive’s *Save *application offers several storage options for archiving media via a mobile device. This documentation outlines the technical details for the DWeb Backend (working title *Raven*), a peer-to-peer (P2P), decentralized backend that ensures secure and efficient data management. 8 | 9 | 10 | ## 1.1 Overview 11 | 12 | P2P networks are decentralized systems where nodes (a.k.a. peers) function as both clients and servers, enabling direct resource sharing and collaboration without relying on centralized infrastructure. Typically, each participating node maintains equal privileges and can initiate or fulfill requests for data, bandwidth, or other network services. A key benefit of a P2P network is that its decentralized architecture removes the risk of a single point of failure or control found in centralized systems. 13 | 14 | The *Save* DWeb Backend relies on two P2P protocols, [Veilid](https://veilid.com/) and [Iroh](https://www.iroh.computer/), selected for their different strengths. Veilid offers secure peer discovery, connections, and public key cryptography. It provides encrypted and anonymous P2P connections by routing traffic through multiple peers in a similar setup to TOR. Iroh is used for blob replication and data storage. It handles collections of data that change over time, ensuring reliable data management with integrity verification. 15 | 16 | 17 | ## 1.2 Key Concepts & Terminology 18 | 19 | 20 | 21 | * **Peer-to-Peer (P2P)**: A decentralized network architecture where each participant (peer) can act as both a client and a server. 22 | * **Distributed Hash Table (DHT)**: A decentralized data structure that allows for the efficient storage and retrieval of key-value pairs across a network of peers. 23 | * **Blob**: A binary large object, typically used to store files or data collections in the backend. 24 | * **Veilid**: A protocol used for secure peer discovery and communication. 25 | * **Iroh**: A library for blob replication and data storage. 26 | * **Route ID: **A unique identifier for communication paths between peers. 27 | * **AppCall: **Veilid’s encrypted message system. 28 | * **Tunnel: **Multiplexed connection system used for peer communication. 29 | 30 | 31 | ## 1.3 Complementary resources 32 | 33 | 34 | 35 | * [Save DWeb Backend Privacy Overview and Risk Assessment](https://hackmd.io/@s74XZjUBQDuPPS04AgPvow/HkacodHgyx) 36 | * [Veilid’s Rust Documentation](https://docs.rs/veilid-core/latest/veilid_core/) for the Veilid-core library 37 | * [Iroh’s Rust Documentation](https://docs.rs/iroh/latest/iroh/) for the Iroh library 38 | 39 | 40 | ## 2.0 Architecture 41 | 42 | 43 | ### Save App Architecture 44 | 45 | ```mermaid 46 | graph TD; 47 | A["Android Kotlin"] -->|RPC| B["P2P Daemon Rust"]; 48 | C["iOS Swift"] -->|RPC| B; 49 | B --> D["Data Repo"]; 50 | D --> E["Personal Data"]; 51 | D --> F["External"]; 52 | G["Sync Admin"] -->|View/Remove
Via veilid?| H["Sync Server"]; 53 | B -.->|Code reuse| H; 54 | I --> H; 55 | H --> I["P2P Sync Group"]; 56 | I -->|View, Replicate| J["Other peers"]; 57 | J -->|Add archives| I; 58 | ``` 59 | 60 | P2P Data Synchronization and Replication Architecture 61 | 62 | 63 | ### P2P Connections 64 | 65 | ```mermaid 66 | graph TD; 67 | A["groupId"] -->|Find peers| B["veilidKeyValue"]; 68 | B --> C["veilidTunnels"]; 69 | C --> D["irohStorageVerification"]; 70 | D --> E["irohDocs"]; 71 | E --> F["groupDoc"]; 72 | F --> G["personalRepo"]; 73 | G --> H["CBORFileList"]; 74 | D --> H; 75 | ``` 76 | 77 | Group Data Retrieval and Verification Flow 78 | 79 | 80 | ## 2.1 High-Level Design 81 | 82 | 83 | ### Introduction 84 | 85 | `save-dweb-backend` is a decentralized web storage backend for the *Save* app with these core components: 86 | 87 | 88 | 89 | * `Backend`: the core system that manages the initialization and lifecycle of the Veilid instance, groups, and data repositories. ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/backend.rs#L90-L111)) 90 | * `Group`: a collection of peers identified by a Veilid DHT record keypair and encryption key, used to manage shared data repositories and secure communications. ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/group.rs#L35-L43)) 91 | * `Repo`: a data store within a group, identified by a Veilid DHT record keypair and encrypted with the group's shared secret. ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/repo.rs#L25-L33)) 92 | 93 | 94 | ### Basic Flow 95 | 96 | 97 | 98 | 1. Initialization: The backend sets up storage and loads known groups. 99 | 2. Groups: Users create or join groups with cryptographic keys. 100 | 3. Repositories: Each user in a group holds their data and route ID in a Repo which is linked to from the group. 101 | 4. Peer Communication: Veilid handles secure connections; Iroh provides data verification. 102 | 103 | 104 | ## 2.2 Groups 105 | 106 | Groups are the fundamental organizational unit in the system, enabling secure peer discovery and shared data access. 107 | 108 | 109 | ## 2.2.1 Group operations 110 | 111 | 112 | ### Create Group: 113 | 114 | 115 | 116 | * Generate a Veilid DHT record with ED25519 keypair and signed metadata 117 | * Generate a random shared secret using chacha20poly1305 encryption 118 | * The group is identified by its DHT record key, owner keypair, and encryption shared secret 119 | 120 | ([Source](https://github.com/OpenArchive/save-dweb-backend/blob/main/src/backend.rs#L343-L382)) 121 | 122 | 123 | 124 | ### Join Group: 125 | 126 | 127 | 128 | * Use provided CommonKeypair or invite URL containing DHT key, owner keypair, and shared secret 129 | * Initialize Veilid DHT record using record ID and owner keypair 130 | * Use shared secret to decrypt group name and repository list 131 | * Add group to known groups list 132 | 133 | ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/backend.rs#L296-L341)) 134 | 135 | 136 | 137 | ## 2.2.2 Group structure 138 | 139 | `Group` struct manages decentralized storage, networking, and encryption, integrating a DHT record, encryption key, routing context, and APIs for Veilid and Iroh blobs, with a mutex-protected repo map. 140 | 141 | [Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/group.rs#L45-L60) 142 | 143 | 144 | ### DHT Record Format: 145 | 146 | 147 | 148 | * Subkey 0: Reserved for group name 149 | * Subkeys 1+: Used by members to register their Data Repository keys 150 | * All values are encrypted with the group's shared secret 151 | 152 | 153 | ## 2.2.3 Group membership management 154 | 155 | 156 | ### Discovery Process: 157 | 158 | 159 | 160 | * Peers look up other group members Repos in the DHT 161 | * Peer’s Veilid Route IDs are looked up from their Repo 162 | * AppCalls are sent to member’s routes IDs for replication 163 | * Multiple “tunnels” are multiplexed over a single route ID 164 | * Tunnels are exposed as a duplex binary stream for replication 165 | * New peers request a sync of Iroh blobs 166 | 167 | 168 | ## 2.2.4 Data Management: 169 | 170 | 171 | 172 | * Personal Data:** **CBOR encoded list of file names mapping to file hashes 173 | * External Data:** **Peers track other peers’ data repositories and can optionally download blobs for backups. 174 | 175 | 176 | ## 2.3 Network Topology 177 | 178 | ```mermaid 179 | graph TD; 180 | A["BOOTSTRAP"] -->|Node List| B["YOU!"]; 181 | B -->|Find Self| A; 182 | 183 | B -->|Ping| C["HEADLESS"]; 184 | C -->|Status| B; 185 | B -->|Status| D["DWEB BACKEND DAEMON"]; 186 | D -->|Ping| B; 187 | D -->|Gossip via app calls| E["SYNC GROUP"]; 188 | 189 | C -->|Signal| F["MOBILE APP"]; 190 | F -->|Signal| C; 191 | 192 | G -->|View, Replicate| E; 193 | E -->|Add archives| G["PEER"]; 194 | ``` 195 | 196 | Decentralized Web Synchronization and Communication Flow 197 | 198 | 199 | ## 2.4 Data Flow 200 | 201 | 202 | ### Data Flow Diagram 203 | 204 | ```mermaid 205 | graph TD; 206 | A["Backend Initialization"] --> B["Group Management"]; 207 | B --> C["Repository Operations"]; 208 | B --> D["Create/Join Groups"]; 209 | B --> E["Manage Members"]; 210 | 211 | C --> F["Peer Communication"]; 212 | C --> G["Create Repos"]; 213 | 214 | F --> H["Route Updates"]; 215 | F --> I["Data Replication"]; 216 | 217 | G --> J["File Transfer"]; 218 | 219 | D --> K["Update Routes"]; 220 | 221 | ``` 222 | 223 | Data flow diagram illustrating the interactions between the core components 224 | 225 | 226 | ### Summary 227 | 228 | The data flow in the `save-dweb-backend` project involves the initialization of the backend, management of groups and repositories, and secure communication between peers. Each component interacts with others to ensure seamless data management and peer communication. 229 | 230 | 231 | ## 3.0 Core Features 232 | 233 | 234 | ## 3.1 Peer Discovery 235 | 236 | Peer discovery is performed on individual groups. A lookup is done on the group DHT record to list Repos. Then, the repo DHT records are queried for that members’ name, Route ID, and collection hash. These are all encrypted with the group secret key. The name represents a human readable name for the member, the Route ID is used to facilitate secure peer to peer connections, and the collection hash is the Iroh Blob hash representing the file list for the user. 237 | 238 | 239 | ## 3.1.1 Backend Initialization 240 | 241 | When the Backend is started, the Veilid and Iroh components are initialized. It sets up storage and loads previously saved groups and repositories by iterating through a list of known group IDs and initializing groups from disk. From there each group will initialize by attempting to load the user’s Repo and add themselves to the DHT if they are not found on it. ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/backend.rs#L113-L156)) 242 | 243 | 244 | ## 3.1.2 Repository Operations 245 | 246 | 247 | 248 | * Creation: Users create DHT records with private keypairs after loading a group 249 | * Registration: Repository DHT keys are added to the group's subkeys 250 | * File Operations: Create/Read/Update/Delete operations for files add data to Iroh, update the file list, and then publish the new file collection hash to the DHT 251 | * Routing: The `update_route_on_dht` method ensures repositories remain discoverable by publishing the current Veilid route ID onto the DHT 252 | 253 | ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/repo.rs#L62-L77)) 254 | 255 | 256 | 257 | ## 3.1.3 File Management 258 | 259 | 260 | 261 | * Format: Files uploaded to Iroh and then they are stored as a CBOR encoded` HashMap<String, Hash> `([Source](https://github.com/rangermauve/veilid-iroh-blobs/blob/default/src/iroh.rs#L38)) 262 | * String: file path within dataset 263 | * Hash: 32-byte Blake3 iroh hash (raw bytes) 264 | * This encoded dada is then stored into Iroh and the Blake3 hash is used to represent the current state of the file list. 265 | * State Updates: 266 | 1. Get current hash or create empty file list 267 | 2. Load and parse HashMap from Iroh 268 | 3. Modify HashMap as needed 269 | 4. Encode to CBOR bytes and save to Iroh 270 | 5. Store latest hash on DHT ([Source](https://github.com/rangermauve/veilid-iroh-blobs/blob/default/src/iroh.rs#L683-L737)) 271 | 272 | 273 | ## 3.2 Data Transmission 274 | 275 | Veilid provides a way to create routes that can receive messages. However, creating many tunnels is computationally expensive, so it is better to reuse a single route ID. On top of this foundation we built a way to multiplex several connections from several peers. 276 | 277 | Tunnels are identified using a route ID of the sender and an unsigned 32 bit integer. This allows peers to open multiple tunnels to others by increasing the 32 bit integer. ([Source](https://github.com/rangermauve/veilid-iroh-blobs/blob/default/src/tunnels.rs#L50-L66)) 278 | 279 | 280 | #### Tunnels - Wire 281 | 282 | Messages are sent using Veilid AppCalls to routes. Each message is prefixed with a 32 bit unsigned integer, followed by 32 bytes for the route id. The rest of the AppCall is the actual contents of the packet for that tunnel. 283 | 284 | We use AppCalls to ensure ordering for tunnels. Each message for a tunnel is sent one at a time and waits for an AppCallResponse from the recipient before the next message is sent. Multiple tunnels can send at the same time without needing to wait for each other. 285 | 286 | The first message (PING) sent through a tunnel contains the bytes [0x07, 0x02, 0x08, 0x03] (SAVE on a phone dial pad) followed by the route ID blob needed to register the route with Veilid. When a peer gets a tunnel ID it has not seen before it should attempt to check if the message contains the PING and if not ignore the tunnel. If the PING is present, the application should register the tunnel and listen for subsequent messages. The Route ID from the tunnel ID is where responses must be sent. 287 | 288 | 289 | ```mermaid 290 | sequenceDiagram 291 | participant AppA 292 | participant TunnelsA 293 | participant TunnelsB 294 | participant AppB 295 | 296 | AppA->>TunnelsA: Open New Tunnel to RouteIDB:Blob 297 | note right of TunnelsA: Register RouteIDB:Blob
with Veilid and get RouteIDB 298 | note right of TunnelsA: Create Tunnel ID
(u32:RouteIDA) 299 | 300 | TunnelsA->>TunnelsB: (u32:RouteIDA)PING(RouteIDA:Blob) 301 | TunnelsA->>AppA: New Tunnel (u32:RouteIDB) 302 | 303 | note right of TunnelsB: Verify PING 304 | note right of TunnelsB: Register RouteIDA:Blob 305 | 306 | TunnelsB->>AppB: New Tunnel (u32:RouteIDA) 307 | AppB->>TunnelsB: Send BYTES to (u32:RouteIDA) 308 | 309 | TunnelsB->>TunnelsA: (u32:RouteIDB)(BYTES) 310 | AppA->>TunnelsA: New data (u32:RouteIDB): BYTES 311 | 312 | ``` 313 | Tunnels and RouteIDs communication flow in Veilid 314 | 315 | 316 | ## 3.3 Security Implementation 317 | 318 | 319 | ### Encryption 320 | 321 | In-transic encryption is handled by Veilid’s routes. Connections use Onion Routing to send packets through at least one other node on each side, thus hiding the IP address of the sender and receiver from each other and preventing intermediate nodes from knowing the full route a packet will take. 322 | 323 | 324 | 325 | * Implements AEAD (Authenticated Encryption with Associated Data) encryption 326 | * Uses random nonces for each encryption operation 327 | * Combines encryption key with nonce for secure message encryption 328 | * Provides authentication of encrypted data 329 | 330 | 331 | ### Route Management 332 | 333 | Routes are created using Veilid’s routing context API. During route creation our system attempts to create new custom private routes up to 6 times ([Source](https://github.com/rangermauve/veilid-iroh-blobs/blob/default/src/lib.rs#L988-L1005)) before returning an error. We use the [low latency](https://docs.rs/veilid-core/latest/veilid_core/enum.Stability.html#variant.LowLatency) stability preference for routes because it has been the fastest and most reliable option in our testing. We use the NoPreference sequencing option in order to allow for both TCP and UDP connections which have different likelihood to connect in different network environments. 334 | 335 | 336 | ## 3.4 Data Replication Protocol 337 | 338 | 339 | ### Message Protocol 340 | 341 | In order to replicate data, we open Tunnels to other peers and then send messages prefixed by Command bytes specifying the type of message being sent. 342 | 343 | Command bytes for operations: 344 | 345 | 346 | 347 | * `0x00` (NO): Hash not found 348 | * `0x01` (YES): Hash exists 349 | * `0x10` (HAS): Query hash existence 350 | * `0x11` (ASK): Request hash data 351 | * `0x20` (DATA): Data chunk transfer 352 | * `0x22` (DONE): Transfer complete 353 | * `0xF0` (ERR): Error occurred 354 | 355 | ([Source](https://github.com/rangermauve/veilid-iroh-blobs/blob/default/src/iroh.rs#L40-L46)) 356 | 357 | The replication process: 358 | 359 | * Check if we have data locally and return if it exists. 360 | * Downloading starts by sending an ASK request and waiting for a HAS response. 361 | * After the HAS response the peer will send a series of messages starting with DATA followed by a chunk of the raw data. 362 | * Once all the data has been sent the peer sends a DONE message and stops replicating the Tunnel. 363 | * This data stream is ingested into the local Iroh store. 364 | * Once the data stream is finished we verify the final hash from Iroh. 365 | * If it does not match the data is thrown out and replication is started with another peer. 366 | * When replication we first ask for a Repo’s collection hash in order to get the hashes for their file list. 367 | * Afterwards we may initiate downloads for individual files by first looking up their hash in the collection. 368 | * Replication connects to peers in the group in random order until one has the data, thus ensuring we don’t need a specific peer to be online as long as somebody has a copy. 369 | * Implements failure handling with peer fallback \ 370 | ([Source](https://github.com/rangermauve/veilid-iroh-blobs/blob/default/src/iroh.rs#L450-L550)) 371 | 372 | 373 | ### Data Replication in Group 374 | 375 | To download a Hash from the group we use the following algorithm: 376 | 377 | 378 | 379 | * List repos in the group 380 | * Get their route IDs 381 | * Shuffle the list of routes for load balancing 382 | * For each route: 383 | * Verify route ID blob 384 | * Send ASK command with hash 385 | * If successful: 386 | * Receive and verify data chunks 387 | * Store verified data 388 | * End replication 389 | * On failure: 390 | * Log error 391 | * Try next peer 392 | * If all peers failed, return an error to the application 393 | 394 | Due to the high latency of Veilid tunnels, it's faster to ask random peers for data than it is to get the list of peers that have data and select from them. 395 | 396 | 397 | ## 4. Trust Model 398 | 399 | 400 | ### Overview 401 | 402 | The **trust model** in `save-dweb-backend` is designed to ensure that **only authorized peers can interact with the system**, while protecting against **malicious actors**. 403 | 404 | 405 | ### Core Trust Assumptions 406 | 407 | 408 | 409 | * Group-Based Trust 410 | * A **peer is trusted** only if they possess the group’s **shared secret**. ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/src/group.rs#L206-L207)) 411 | * Trust is **not assumed by default**—new peers must be **invited to the group.** 412 | * End-to-End Data Integrity 413 | * Every piece of data is cryptographically **signed or encrypted**, ensuring that it has **not been modified** in transit. 414 | * Decentralized Trust Model 415 | * No **central authority** exists—peers **self-organize** and invite each other. 416 | 417 | 418 | ### Security Best Practices 419 | 420 | 421 | 422 | * **Regular Dependency Updates**: Keep all dependencies updated to mitigate vulnerabilities. 423 | * Use a tool like `cargo audit` to check for vulnerabilities in your dependencies. 424 | 425 | 426 | ## 5. Implementation 427 | 428 | 429 | ## 5.1 Technology Stack 430 | 431 | The Save DWeb Backend is built using the Rust programming language, leveraging its performance and safety features. The key components of the technology stack include: 432 | 433 | 434 | 435 | * **Rust**: The primary programming language used for developing the backend. 436 | * **Cargo**: The Rust package manager and build system, which manages dependencies and project configuration. 437 | * **Tokio**: An asynchronous runtime for Rust, enabling concurrent programming and efficient I/O operations. 438 | * **Serde**: A framework for serializing and deserializing Rust data structures, facilitating data interchange formats like JSON and CBOR. 439 | * **Iroh**: A library for blob replication and data storage, ensuring reliable data verification. 440 | * **Veilid**: A protocol for secure peer discovery and connections, providing encrypted and anonymous communication. ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/Cargo.toml)) 441 | 442 | 443 | ## 6. Development Guidelines 444 | 445 | 446 | ## 6.1 Project Setup 447 | 448 | 449 | ### Dependencies 450 | 451 | 452 | 453 | * Rust toolchain 454 | * Cargo package manager 455 | * Development dependencies listed in Cargo.toml ([Source](https://github.com/openarchive/save-dweb-backend/blob/main/Cargo.toml)) 456 | 457 | 458 | ### Building and Testing 459 | 460 | 461 | 462 | * Run tests with `cargo test` 463 | * Format code with `cargo fmt` 464 | * Lint with `cargo clippy` 465 | * Run the backend with `cargo run` 466 | 467 | [Source](https://github.com/openarchive/save-dweb-backend/blob/main/README.md#running) 468 | 469 | 470 | ## 6.2 Code Organization 471 | 472 | The codebase is organized into several key modules: 473 | 474 | 475 | 476 | * `backend`: Core backend functionality and initialization 477 | * `common`: Shared utilities and traits 478 | * `group`: Group management and operations 479 | * `repo`: Repository handling 480 | * `rpc`: RPC service implementation 481 | 482 | 483 | ## 6.3 Contributing Guidelines 484 | 485 | 486 | ### Code Style 487 | 488 | 489 | 490 | * Follow Rust standard formatting guidelines 491 | * Use meaningful variable and function names 492 | * Document public APIs with rustdoc comments 493 | 494 | 495 | ### Testing 496 | 497 | 498 | 499 | * Write unit tests for new functionality 500 | * Ensure existing tests pass 501 | * Add integration tests for new features 502 | 503 | 504 | ### Security Considerations 505 | 506 | 507 | 508 | * Review cryptographic implementations 509 | * Validate input data 510 | * Handle errors appropriately -------------------------------------------------------------------------------- /graphviz_architecture.dot: -------------------------------------------------------------------------------- 1 | digraph { 2 | rankdir=TD 3 | sync[label="Backups Server\n(Cloud/PC)" shape=house]; 4 | android[label="Android Kotlin" shape=Msquare]; 5 | ios[label="iOS Swift" shape=Msquare]; 6 | daemon[label="DWeb Backend Daemon"]; 7 | syncGroup[label="P2P Sync Group\n(Gossip via app calls)"]; 8 | peer[label="Other peers" shape=Msquare] 9 | admin[label="Sync Admin\n(admin key pair)" shape=Msquare] 10 | repo[label="Group Data\n(Veilid DHT Record)" shape=cylinder] 11 | external[label="Others Data\n(Veilid DHT Record)" shape=folder] 12 | personal[label="Personal Data Repo\n(Veilid DHT Record)" shape=folder] 13 | vrpc[label="Veilid app calls"] 14 | rpc[label="Inter-process RPC/FFI"] 15 | daemon -> syncGroup; 16 | android -> rpc; 17 | ios -> rpc; 18 | rpc -> daemon[label="Unix domain socket or FFI"]; 19 | peer -> syncGroup [label="Add archives"]; 20 | syncGroup -> peer[label="View, Replicate"]; 21 | sync -> syncGroup; 22 | syncGroup -> sync; 23 | admin -> vrpc[label="View/Remove Groups"]; 24 | { 25 | rank=same; 26 | daemon -> sync[style=dashed label="Code reuse"] 27 | } 28 | repo -> external; 29 | repo -> personal; 30 | daemon -> repo; 31 | ios -> vrpc; 32 | android -> vrpc; 33 | vrpc -> sync[label="Veilid Route"]; 34 | { 35 | rank=same; 36 | external -> peer[style=dashed]; 37 | } 38 | } -------------------------------------------------------------------------------- /src/backend.rs: -------------------------------------------------------------------------------- 1 | use crate::backend; 2 | use crate::common::{init_veilid, make_route, CommonKeypair, DHTEntity}; 3 | use crate::constants::KNOWN_GROUP_LIST; 4 | use crate::group::{self, Group, URL_DHT_KEY, URL_ENCRYPTION_KEY, URL_PUBLIC_KEY, URL_SECRET_KEY}; 5 | use crate::repo::Repo; 6 | use anyhow::{anyhow, Result}; 7 | use clap::builder::Str; 8 | use iroh::node::Node; 9 | use iroh_blobs::format::collection::Collection; 10 | use iroh_blobs::util::SetTagOption; 11 | use iroh_blobs::Hash; 12 | use serde::{Deserialize, Serialize}; 13 | use std::collections::HashMap; 14 | use std::mem; 15 | use std::ops::Deref; 16 | use std::path::{Path, PathBuf}; 17 | use std::sync::Arc; 18 | use tokio::fs; 19 | use tokio::sync::Mutex; 20 | use tokio::sync::{ 21 | broadcast, 22 | mpsc::{self, Receiver}, 23 | oneshot, 24 | }; 25 | use tracing::info; 26 | use url::Url; 27 | use veilid_core::{ 28 | api_startup_config, vld0_generate_keypair, CryptoKey, CryptoSystem, CryptoSystemVLD0, 29 | CryptoTyped, DHTSchema, KeyPair, ProtectedStore, RoutingContext, SharedSecret, TypedKey, 30 | UpdateCallback, VeilidAPI, VeilidConfigInner, VeilidConfigProtectedStore, VeilidUpdate, 31 | CRYPTO_KEY_LENGTH, CRYPTO_KIND_VLD0, 32 | }; 33 | use veilid_iroh_blobs::iroh::VeilidIrohBlobs; 34 | use veilid_iroh_blobs::tunnels::{OnNewRouteCallback, OnRouteDisconnectedCallback}; 35 | use xdg::BaseDirectories; 36 | 37 | #[derive(Serialize, Deserialize, Debug)] 38 | pub struct KnownGroupList { 39 | groups: Vec, 40 | } 41 | 42 | pub struct BackendInner { 43 | path: PathBuf, 44 | veilid_api: Option, 45 | update_rx: Option>, 46 | groups: HashMap>, 47 | pub iroh_blobs: Option, 48 | on_new_route_callback: Option, 49 | } 50 | 51 | impl BackendInner { 52 | async fn save_known_group_ids(&self) -> Result<()> { 53 | let groups = self.groups.clone().into_keys().collect(); 54 | 55 | let info = KnownGroupList { groups }; 56 | 57 | println!("Saving group IDs {:?}", info); 58 | let data = 59 | serde_cbor::to_vec(&info).map_err(|e| anyhow!("Failed to serialize keypair: {}", e))?; 60 | self.veilid()? 61 | .protected_store()? 62 | .save_user_secret(KNOWN_GROUP_LIST, &data) 63 | .map_err(|e| anyhow!("Unable to store known group IDs: {}", e))?; 64 | Ok(()) 65 | } 66 | 67 | fn veilid(&self) -> Result { 68 | Ok(self 69 | .veilid_api 70 | .as_ref() 71 | .ok_or_else(|| anyhow!("Veilid API not initialized"))? 72 | .clone()) 73 | } 74 | 75 | fn iroh_blobs(&self) -> Result { 76 | Ok(self 77 | .iroh_blobs 78 | .as_ref() 79 | .ok_or_else(|| anyhow!("Veilid Iroh Blobs API not initialized"))? 80 | .clone()) 81 | } 82 | } 83 | 84 | #[derive(Clone)] 85 | pub struct Backend { 86 | inner: Arc>, 87 | } 88 | 89 | impl Backend { 90 | pub fn new(base_path: &Path) -> Result { 91 | let inner = BackendInner { 92 | path: base_path.to_path_buf(), 93 | veilid_api: None, 94 | update_rx: None, 95 | groups: HashMap::new(), 96 | iroh_blobs: None, 97 | on_new_route_callback: None, 98 | }; 99 | 100 | let backend = Backend { 101 | inner: Arc::new(Mutex::new(inner)), 102 | }; 103 | 104 | Ok(backend) 105 | } 106 | 107 | pub async fn from_dependencies( 108 | base_path: &Path, 109 | veilid_api: VeilidAPI, 110 | update_rx: broadcast::Receiver, 111 | store: iroh_blobs::store::fs::Store, 112 | ) -> Result { 113 | let inner = BackendInner { 114 | path: base_path.to_path_buf(), 115 | veilid_api: Some(veilid_api.clone()), 116 | update_rx: Some(update_rx), 117 | groups: HashMap::new(), 118 | iroh_blobs: None, 119 | on_new_route_callback: None, 120 | }; 121 | 122 | let backend = Backend { 123 | inner: Arc::new(Mutex::new(inner)), 124 | }; 125 | 126 | let inner_clone = backend.inner.clone(); 127 | 128 | let on_new_route_callback: OnNewRouteCallback = Arc::new(move |route_id, route_id_blob| { 129 | let inner = inner_clone.clone(); 130 | println!("Re-generating route"); 131 | tokio::spawn(async move { 132 | let inner = inner.lock().await; 133 | 134 | if let Some(on_new_route) = &inner.on_new_route_callback { 135 | on_new_route(route_id, route_id_blob) 136 | } 137 | 138 | for group in inner.groups.clone().into_values() { 139 | if let Some(repo) = group.get_own_repo().await { 140 | if let Err(err) = repo.update_route_on_dht().await { 141 | eprintln!( 142 | "Unable to update route after rebuild in group {} in repo {}: {}", 143 | group.id(), 144 | repo.id(), 145 | err 146 | ); 147 | } 148 | } 149 | } 150 | }); 151 | }); 152 | 153 | let on_disconnected_callback: OnRouteDisconnectedCallback = Arc::new(move || { 154 | println!("Route died"); 155 | }); 156 | 157 | let (route_id, route_id_blob) = make_route(&veilid_api).await?; 158 | let routing_context = veilid_api.routing_context()?; 159 | 160 | let mut inner = backend.inner.lock().await; 161 | 162 | // Initialize iroh_blobs 163 | inner.iroh_blobs = Some(VeilidIrohBlobs::new( 164 | veilid_api.clone(), 165 | routing_context, 166 | route_id_blob, 167 | route_id, 168 | inner.update_rx.as_ref().unwrap().resubscribe(), 169 | store, 170 | Some(on_disconnected_callback), // TODO: Notify application of route closure? 171 | Some(on_new_route_callback), 172 | )); 173 | 174 | drop(inner); 175 | 176 | Ok(backend) 177 | } 178 | 179 | pub async fn start(&self) -> Result<()> { 180 | let mut inner = self.inner.lock().await; 181 | 182 | if inner.veilid_api.is_some() { 183 | return Err(anyhow!("Veilid already initialized")); 184 | } 185 | println!("Starting on {}", inner.path.display()); 186 | 187 | let base_dir = inner.path.clone(); 188 | fs::create_dir_all(&base_dir).await?; 189 | 190 | let (veilid_api, mut update_rx) = init_veilid(&base_dir, "openarchive".to_string()).await?; 191 | 192 | inner.veilid_api = Some(veilid_api.clone()); 193 | inner.update_rx = Some(update_rx.resubscribe()); 194 | 195 | // Initialize iroh_blobs store 196 | let store = iroh_blobs::store::fs::Store::load(base_dir.join("iroh")).await?; 197 | 198 | // Create route_id and route_id_blob 199 | let (route_id, route_id_blob) = make_route(&veilid_api).await?; 200 | 201 | // Get veilid_api and routing_context 202 | let routing_context = veilid_api.routing_context()?; 203 | 204 | let inner_clone = self.inner.clone(); 205 | 206 | let on_new_route_callback: OnNewRouteCallback = Arc::new(move |route_id, route_id_blob| { 207 | let inner = inner_clone.clone(); 208 | println!("Re-generating route"); 209 | tokio::spawn(async move { 210 | let inner = inner.lock().await; 211 | 212 | if let Some(on_new_route) = &inner.on_new_route_callback { 213 | on_new_route(route_id, route_id_blob) 214 | } 215 | 216 | for group in inner.groups.clone().into_values() { 217 | if let Some(repo) = group.get_own_repo().await { 218 | if let Err(err) = repo.update_route_on_dht().await { 219 | eprintln!( 220 | "Unable to update route after rebuild in group {} in repo {}: {}", 221 | group.id(), 222 | repo.id(), 223 | err 224 | ); 225 | } 226 | } 227 | } 228 | }); 229 | }); 230 | 231 | // Initialize iroh_blobs 232 | inner.iroh_blobs = Some(VeilidIrohBlobs::new( 233 | veilid_api.clone(), 234 | routing_context, 235 | route_id_blob, 236 | route_id, 237 | update_rx.resubscribe(), 238 | store, 239 | None, // TODO: Notify application of route closure? 240 | Some(on_new_route_callback), 241 | )); 242 | 243 | drop(inner); 244 | 245 | if let Err(err) = self.load_known_groups().await { 246 | eprintln!("No known groups on start"); 247 | } 248 | 249 | Ok(()) 250 | } 251 | 252 | pub async fn stop(&self) -> Result<()> { 253 | let mut inner = self.inner.lock().await; 254 | println!("Stopping Backend..."); 255 | if let Some(iroh_blobs) = inner.iroh_blobs.take() { 256 | println!("Shutting down Veilid Iroh Blobs"); 257 | iroh_blobs.shutdown().await?; 258 | println!("Veilid Iroh Blobs shut down successfully"); 259 | } 260 | if inner.veilid_api.is_some() { 261 | println!("Shutting down Veilid API"); 262 | let veilid = inner.veilid_api.take(); 263 | veilid.unwrap().shutdown().await; 264 | println!("Veilid API shut down successfully"); 265 | inner.groups = HashMap::new(); 266 | } 267 | Ok(()) 268 | } 269 | 270 | pub async fn set_on_new_route_callback( 271 | &self, 272 | on_new_route_connected_callback: OnNewRouteCallback, 273 | ) { 274 | let mut inner = self.inner.lock().await; 275 | inner.on_new_route_callback = Some(on_new_route_connected_callback); 276 | } 277 | 278 | pub async fn join_from_url(&self, url_string: &str) -> Result> { 279 | let keys = parse_url(url_string)?; 280 | self.join_group(keys).await 281 | } 282 | 283 | pub async fn get_route_id_blob(&self) -> Result> { 284 | if let Some(blobs) = self.get_iroh_blobs().await { 285 | Ok(blobs.route_id_blob().await) 286 | } else { 287 | Err(anyhow!("Veilid not initialized")) 288 | } 289 | } 290 | 291 | pub async fn join_group(&self, keys: CommonKeypair) -> Result> { 292 | let mut inner = self.inner.lock().await; 293 | 294 | let iroh_blobs = inner.iroh_blobs()?; 295 | let veilid = inner.veilid()?; 296 | 297 | let routing_context = veilid.routing_context()?; 298 | let crypto_system = veilid 299 | .crypto()? 300 | .get(CRYPTO_KIND_VLD0) 301 | .ok_or_else(|| anyhow!("Unable to init crypto system")); 302 | 303 | let record_key = TypedKey::new(CRYPTO_KIND_VLD0, keys.id); 304 | // First open the DHT record 305 | let dht_record = routing_context 306 | .open_dht_record(record_key.clone(), None) // Don't pass a writer here yet 307 | .await?; 308 | 309 | // Use the owner key from the DHT record as the default writer 310 | let owner_key = dht_record.owner(); // Call the owner() method to get the owner key 311 | 312 | // Reopen the DHT record with the owner key as the writer 313 | let dht_record = routing_context 314 | .open_dht_record( 315 | record_key.clone(), 316 | Some(KeyPair::new( 317 | owner_key.clone(), 318 | keys.secret_key.clone().unwrap(), 319 | )), 320 | ) 321 | .await?; 322 | 323 | let mut group = Group::new( 324 | dht_record.clone(), 325 | keys.encryption_key.clone(), 326 | routing_context, 327 | veilid.clone(), 328 | iroh_blobs.clone(), 329 | ); 330 | 331 | group.try_load_repo_from_disk().await; 332 | group.load_repos_from_dht().await?; 333 | 334 | inner.groups.insert(group.id(), Box::new(group.clone())); 335 | 336 | inner.save_known_group_ids().await?; 337 | 338 | Ok(Box::new(group)) 339 | } 340 | 341 | pub async fn create_group(&self) -> Result { 342 | let mut inner = self.inner.lock().await; 343 | let iroh_blobs = inner.iroh_blobs()?; 344 | let veilid = inner.veilid()?; 345 | 346 | let routing_context = veilid.routing_context()?; 347 | let crypto = veilid.crypto()?; 348 | let crypto_system = crypto 349 | .get(CRYPTO_KIND_VLD0) 350 | .ok_or_else(|| anyhow!("Unable to init crypto system"))?; 351 | 352 | let schema = DHTSchema::dflt(65)?; // 64 members + a title 353 | let kind = Some(CRYPTO_KIND_VLD0); 354 | let owner_keypair = crypto_system.generate_keypair(); 355 | 356 | let dht_record = routing_context 357 | .create_dht_record(schema, Some(owner_keypair), kind) 358 | .await?; 359 | 360 | let encryption_key = crypto_system.random_shared_secret(); 361 | 362 | let group = Group::new( 363 | dht_record.clone(), 364 | encryption_key, 365 | routing_context, 366 | veilid.clone(), 367 | iroh_blobs.clone(), 368 | ); 369 | 370 | let protected_store = veilid.protected_store().unwrap(); 371 | CommonKeypair { 372 | id: group.id(), 373 | public_key: dht_record.owner().clone(), 374 | secret_key: group.get_secret_key(), 375 | encryption_key: group.get_encryption_key(), 376 | } 377 | .store_keypair(&protected_store) 378 | .await 379 | .map_err(|e| anyhow!(e))?; 380 | 381 | inner.groups.insert(group.id(), Box::new(group.clone())); 382 | 383 | inner.save_known_group_ids().await?; 384 | 385 | Ok(group) 386 | } 387 | 388 | pub async fn get_group(&self, record_key: &CryptoKey) -> Result> { 389 | let mut inner = self.inner.lock().await; 390 | if let Some(group) = inner.groups.get(record_key) { 391 | return Ok(group.clone()); 392 | } 393 | let iroh_blobs = inner.iroh_blobs()?; 394 | let veilid = inner.veilid()?; 395 | 396 | let routing_context = veilid.routing_context()?; 397 | let protected_store = veilid.protected_store().unwrap(); 398 | 399 | // Load the keypair associated with the record_key from the protected store 400 | let retrieved_keypair = CommonKeypair::load_keypair(&protected_store, record_key) 401 | .await 402 | .map_err(|_| anyhow!("Failed to load keypair"))?; 403 | 404 | let crypto = veilid.crypto()?; 405 | let crypto_system = crypto 406 | .get(CRYPTO_KIND_VLD0) 407 | .ok_or_else(|| anyhow!("Unable to init crypto system"))?; 408 | 409 | // Use the owner key from the DHT record as the default writer 410 | let owner_key = retrieved_keypair.public_key; // Call the owner() method to get the owner key 411 | let owner_secret = retrieved_keypair.secret_key; 412 | let record_key = TypedKey::new(CRYPTO_KIND_VLD0, *record_key); 413 | 414 | let owner = owner_secret.map(|secret| KeyPair::new(owner_key, secret)); 415 | 416 | // Reopen the DHT record with the owner key as the writer 417 | let dht_record = routing_context 418 | .open_dht_record(record_key.clone(), owner) 419 | .await?; 420 | 421 | let mut group = Group::new( 422 | dht_record.clone(), 423 | retrieved_keypair.encryption_key.clone(), 424 | routing_context, 425 | veilid.clone(), 426 | iroh_blobs.clone(), 427 | ); 428 | 429 | group.try_load_repo_from_disk().await; 430 | group.load_repos_from_dht().await?; 431 | 432 | inner.groups.insert(group.id(), Box::new(group.clone())); 433 | 434 | drop(inner); 435 | 436 | Ok(Box::new(group)) 437 | } 438 | 439 | pub async fn list_groups(&self) -> Result>> { 440 | let mut inner = self.inner.lock().await; 441 | Ok(inner.groups.values().cloned().collect()) 442 | } 443 | 444 | pub async fn load_known_groups(&self) -> Result<()> { 445 | for id in self.list_known_group_ids().await?.iter() { 446 | self.get_group(id).await?; 447 | } 448 | Ok(()) 449 | } 450 | 451 | pub async fn list_known_group_ids(&self) -> Result> { 452 | let mut inner = self.inner.lock().await; 453 | let veilid = inner.veilid()?; 454 | let data = veilid 455 | .protected_store()? 456 | .load_user_secret(KNOWN_GROUP_LIST) 457 | .map_err(|_| anyhow!("Failed to load keypair"))? 458 | .ok_or_else(|| anyhow!("Keypair not found"))?; 459 | let info: KnownGroupList = 460 | serde_cbor::from_slice(&data).map_err(|_| anyhow!("Failed to deserialize keypair"))?; 461 | Ok(info.groups) 462 | } 463 | 464 | pub async fn close_group(&self, key: CryptoKey) -> Result<()> { 465 | let mut inner = self.inner.lock().await; 466 | if let Some(group) = inner.groups.remove(&key) { 467 | group.close().await.map_err(|e| anyhow!(e))?; 468 | } else { 469 | return Err(anyhow!("Group not found")); 470 | } 471 | Ok(()) 472 | } 473 | 474 | pub async fn create_collection(&self) -> Result { 475 | // Initialize a new Iroh Node in memory 476 | let node = Node::memory().spawn().await?; 477 | 478 | // Get the Client from the node 479 | let iroh_client = node.client().blobs(); 480 | 481 | // Create an empty Collection 482 | let mut collection = Collection::default(); 483 | 484 | // Tag options for creating the collection 485 | let tag_option = SetTagOption::Auto; 486 | 487 | // No tags to delete, so we pass an empty vector 488 | let tags_to_delete = Vec::new(); 489 | 490 | // Use the iroh_client instance to create the collection and get the root hash 491 | let (root_hash, _tag) = iroh_client 492 | .create_collection(collection, tag_option, tags_to_delete) 493 | .await?; 494 | 495 | // Return the root hash 496 | Ok(root_hash) 497 | } 498 | 499 | pub async fn subscribe_updates(&self) -> Option> { 500 | let mut inner = self.inner.lock().await; 501 | inner.update_rx.as_ref().map(|rx| rx.resubscribe()) 502 | } 503 | 504 | pub async fn get_veilid_api(&self) -> Option { 505 | let mut inner = self.inner.lock().await; 506 | 507 | inner.veilid_api.clone() 508 | } 509 | 510 | pub async fn get_iroh_blobs(&self) -> Option { 511 | let mut inner = self.inner.lock().await; 512 | inner.iroh_blobs.clone() 513 | } 514 | 515 | pub async fn get_routing_context(&self) -> Option { 516 | let veilid_api = self.get_veilid_api().await?; 517 | veilid_api.routing_context().ok() 518 | } 519 | } 520 | 521 | async fn wait_for_network(update_rx: &mut broadcast::Receiver) -> Result<()> { 522 | while let Ok(update) = update_rx.recv().await { 523 | if let VeilidUpdate::Attachment(attachment_state) = update { 524 | if attachment_state.public_internet_ready { 525 | println!("Public internet ready!"); 526 | break; 527 | } 528 | } 529 | } 530 | Ok(()) 531 | } 532 | 533 | fn find_query(url: &Url, key: &str) -> Result { 534 | for (query_key, value) in url.query_pairs() { 535 | if query_key == key { 536 | return Ok(value.into_owned()); 537 | } 538 | } 539 | 540 | Err(anyhow!("Unable to find parameter {} in URL {:?}", key, url)) 541 | } 542 | 543 | pub fn crypto_key_from_query(url: &Url, key: &str) -> Result { 544 | let value = find_query(url, key)?; 545 | let bytes = hex::decode(value)?; 546 | let mut key_vec: [u8; CRYPTO_KEY_LENGTH] = [0; CRYPTO_KEY_LENGTH]; 547 | key_vec.copy_from_slice(bytes.as_slice()); 548 | 549 | let key = CryptoKey::from(key_vec); 550 | Ok(key) 551 | } 552 | 553 | pub fn parse_url(url_string: &str) -> Result { 554 | let url = Url::parse(url_string)?; 555 | 556 | let id = crypto_key_from_query(&url, URL_DHT_KEY)?; 557 | let encryption_key = crypto_key_from_query(&url, URL_ENCRYPTION_KEY)?; 558 | let public_key = crypto_key_from_query(&url, URL_PUBLIC_KEY)?; 559 | let secret_key = Some(crypto_key_from_query(&url, URL_SECRET_KEY)?); 560 | 561 | Ok(CommonKeypair { 562 | id, 563 | public_key, 564 | secret_key, 565 | encryption_key, 566 | }) 567 | } 568 | -------------------------------------------------------------------------------- /src/common.rs: -------------------------------------------------------------------------------- 1 | #![allow(async_fn_in_trait)] 2 | #![allow(clippy::async_yields_async)] 3 | 4 | use anyhow::{anyhow, Result}; 5 | use serde::{Deserialize, Serialize}; 6 | use std::{path::Path, path::PathBuf, sync::Arc}; 7 | use tokio::sync::broadcast::{self, Receiver}; 8 | use url::Url; 9 | use veilid_core::{ 10 | CryptoKey, CryptoSystem, CryptoSystemVLD0, CryptoTyped, DHTRecordDescriptor, KeyPair, Nonce, 11 | ProtectedStore, RouteId, RoutingContext, Sequencing, SharedSecret, Stability, UpdateCallback, 12 | VeilidAPI, VeilidConfigInner, VeilidUpdate, CRYPTO_KIND_VLD0, VALID_CRYPTO_KINDS, 13 | }; 14 | 15 | use crate::constants::ROUTE_ID_DHT_KEY; 16 | 17 | pub async fn make_route(veilid: &VeilidAPI) -> Result<(RouteId, Vec)> { 18 | let mut retries = 6; 19 | while retries > 0 { 20 | retries -= 1; 21 | let result = veilid 22 | .new_custom_private_route( 23 | &VALID_CRYPTO_KINDS, 24 | Stability::LowLatency, 25 | Sequencing::NoPreference, 26 | ) 27 | .await; 28 | 29 | if let Ok(value) = result { 30 | return Ok(value); 31 | } else if let Err(e) = &result { 32 | eprintln!("Failed to create route: {}", e); 33 | } 34 | } 35 | Err(anyhow!("Unable to create route, reached max retries")) 36 | } 37 | 38 | pub async fn init_veilid( 39 | base_dir: &Path, 40 | namespace: String, 41 | ) -> Result<(VeilidAPI, Receiver)> { 42 | let config_inner = config_for_dir(base_dir.to_path_buf(), namespace); 43 | 44 | let (tx, mut rx) = broadcast::channel(32); 45 | 46 | let update_callback: UpdateCallback = Arc::new(move |update| { 47 | let tx = tx.clone(); 48 | tokio::spawn(async move { 49 | if tx.send(update).is_err() { 50 | // TODO: 51 | //println!("receiver dropped"); 52 | } 53 | }); 54 | }); 55 | 56 | // println!("Init veilid"); 57 | let veilid = veilid_core::api_startup_config(update_callback, config_inner).await?; 58 | 59 | //println!("Attach veilid"); 60 | 61 | veilid.attach().await?; 62 | 63 | //println!("Wait for veilid network"); 64 | 65 | while let Ok(update) = rx.recv().await { 66 | if let VeilidUpdate::Attachment(attachment_state) = update { 67 | if attachment_state.public_internet_ready && attachment_state.state.is_attached() { 68 | println!("Public internet ready!"); 69 | break; 70 | } 71 | } 72 | } 73 | 74 | Ok((veilid, rx)) 75 | } 76 | 77 | pub fn config_for_dir(base_dir: PathBuf, namespace: String) -> VeilidConfigInner { 78 | VeilidConfigInner { 79 | program_name: "save-dweb-backend".to_string(), 80 | namespace, 81 | protected_store: veilid_core::VeilidConfigProtectedStore { 82 | // avoid prompting for password, don't do this in production 83 | always_use_insecure_storage: true, 84 | directory: base_dir 85 | .join("protected_store") 86 | .to_string_lossy() 87 | .to_string(), 88 | ..Default::default() 89 | }, 90 | table_store: veilid_core::VeilidConfigTableStore { 91 | directory: base_dir.join("table_store").to_string_lossy().to_string(), 92 | ..Default::default() 93 | }, 94 | block_store: veilid_core::VeilidConfigBlockStore { 95 | directory: base_dir.join("block_store").to_string_lossy().to_string(), 96 | ..Default::default() 97 | }, 98 | ..Default::default() 99 | } 100 | } 101 | 102 | #[derive(Serialize, Deserialize, Clone)] 103 | pub struct CommonKeypair { 104 | pub id: CryptoKey, 105 | pub public_key: CryptoKey, 106 | pub secret_key: Option, 107 | pub encryption_key: SharedSecret, 108 | } 109 | 110 | impl CommonKeypair { 111 | pub async fn store_keypair(&self, protected_store: &ProtectedStore) -> Result<()> { 112 | let keypair_data = 113 | serde_cbor::to_vec(&self).map_err(|e| anyhow!("Failed to serialize keypair: {}", e))?; 114 | protected_store 115 | .save_user_secret(self.id.to_string(), &keypair_data) 116 | .map_err(|e| anyhow!("Unable to store keypair: {}", e))?; 117 | Ok(()) 118 | } 119 | 120 | pub async fn load_keypair(protected_store: &ProtectedStore, id: &CryptoKey) -> Result { 121 | let keypair_data = protected_store 122 | .load_user_secret(id.to_string()) 123 | .map_err(|_| anyhow!("Failed to load keypair"))? 124 | .ok_or_else(|| anyhow!("Keypair not found"))?; 125 | let retrieved_keypair: CommonKeypair = serde_cbor::from_slice(&keypair_data) 126 | .map_err(|_| anyhow!("Failed to deserialize keypair"))?; 127 | Ok(retrieved_keypair) 128 | } 129 | } 130 | 131 | pub trait DHTEntity { 132 | fn get_id(&self) -> CryptoKey; 133 | fn get_encryption_key(&self) -> SharedSecret; 134 | fn get_routing_context(&self) -> RoutingContext; 135 | fn get_veilid_api(&self) -> VeilidAPI; 136 | fn get_dht_record(&self) -> DHTRecordDescriptor; 137 | fn get_secret_key(&self) -> Option; 138 | 139 | // Default method to get the owner key 140 | fn owner_key(&self) -> CryptoKey { 141 | self.get_dht_record().owner().clone() 142 | } 143 | 144 | // Default method to get the owner secret 145 | fn owner_secret(&self) -> Option { 146 | self.get_dht_record().owner_secret().cloned() 147 | } 148 | 149 | fn encrypt_aead(&self, data: &[u8], associated_data: Option<&[u8]>) -> Result> { 150 | let veilid = self.get_veilid_api(); 151 | let crypto = veilid.crypto()?; 152 | let crypto_system = crypto 153 | .get(CRYPTO_KIND_VLD0) 154 | .ok_or_else(|| anyhow!("Unable to init crypto system"))?; 155 | let nonce = crypto_system.random_nonce(); 156 | let mut buffer = Vec::with_capacity(nonce.as_slice().len() + data.len()); 157 | buffer.extend_from_slice(nonce.as_slice()); 158 | let encrypted_chunk = crypto_system 159 | .encrypt_aead(data, &nonce, &self.get_encryption_key(), associated_data) 160 | .map_err(|e| anyhow!("Failed to encrypt data: {}", e))?; 161 | buffer.extend_from_slice(&encrypted_chunk); 162 | Ok(buffer) 163 | } 164 | 165 | fn decrypt_aead(&self, data: &[u8], associated_data: Option<&[u8]>) -> Result> { 166 | let veilid = self.get_veilid_api(); 167 | let crypto = veilid.crypto()?; 168 | let crypto_system = crypto 169 | .get(CRYPTO_KIND_VLD0) 170 | .ok_or_else(|| anyhow!("Unable to init crypto system"))?; 171 | 172 | let nonce: [u8; 24] = data[..24] 173 | .try_into() 174 | .map_err(|_| anyhow!("Failed to convert nonce slice to array"))?; 175 | let nonce = Nonce::new(nonce); 176 | let encrypted_data = &data[24..]; 177 | crypto_system 178 | .decrypt_aead( 179 | encrypted_data, 180 | &nonce, 181 | &self.get_encryption_key(), 182 | associated_data, 183 | ) 184 | .map_err(|e| anyhow!("Failed to decrypt data: {}", e)) 185 | } 186 | 187 | async fn set_name(&self, name: &str) -> Result<()> { 188 | let routing_context = self.get_routing_context(); 189 | let key = self.get_dht_record().key().clone(); 190 | let encrypted_name = self.encrypt_aead(name.as_bytes(), None)?; 191 | routing_context 192 | .set_dht_value(key, 0, encrypted_name, None) 193 | .await?; 194 | Ok(()) 195 | } 196 | 197 | async fn get_name(&self) -> Result { 198 | let routing_context = self.get_routing_context(); 199 | let key = self.get_dht_record().key().clone(); 200 | let value = routing_context.get_dht_value(key, 0, false).await?; 201 | match value { 202 | Some(value) => { 203 | let decrypted_name = self.decrypt_aead(value.data(), None)?; 204 | Ok(String::from_utf8(decrypted_name) 205 | .map_err(|e| anyhow!("Failed to convert DHT value to string: {}", e))?) 206 | } 207 | None => Err(anyhow!("Value not found")), 208 | } 209 | } 210 | 211 | async fn close(&self) -> Result<()> { 212 | let routing_context = self.get_routing_context(); 213 | let key = self.get_dht_record().key().clone(); 214 | routing_context.close_dht_record(key).await?; 215 | Ok(()) 216 | } 217 | 218 | async fn store_route_id_in_dht(&self, route_id_blob: Vec) -> Result<()> { 219 | let routing_context = &self.get_routing_context(); 220 | let dht_record = self.get_dht_record(); 221 | routing_context 222 | .set_dht_value( 223 | dht_record.key().clone(), 224 | ROUTE_ID_DHT_KEY, 225 | route_id_blob, 226 | None, 227 | ) 228 | .await 229 | .map_err(|e| anyhow!("Failed to store route ID blob in DHT: {}", e))?; 230 | 231 | Ok(()) 232 | } 233 | 234 | async fn get_route_id_from_dht(&self, subkey: u32) -> Result> { 235 | let routing_context = &self.get_routing_context(); 236 | 237 | // Use the existing DHT record 238 | let dht_record = self.get_dht_record(); 239 | 240 | // Get the stored route ID blob at subkey 241 | let stored_blob = routing_context 242 | .get_dht_value(dht_record.key().clone(), ROUTE_ID_DHT_KEY, false) 243 | .await? 244 | .ok_or_else(|| anyhow!("Route ID blob not found in DHT"))?; 245 | 246 | Ok(stored_blob.data().to_vec()) 247 | } 248 | 249 | // Send an AppMessage to the repo owner using the stored route ID blob 250 | async fn send_message_to_owner( 251 | &self, 252 | veilid: &VeilidAPI, 253 | message: Vec, 254 | subkey: u32, 255 | ) -> Result<()> { 256 | let routing_context = self.get_routing_context(); 257 | 258 | // Retrieve the route ID blob from DHT 259 | let route_id_blob = self.get_route_id_from_dht(subkey).await?; 260 | 261 | // Import the route using the blob via VeilidAPI 262 | let route_id = match veilid.import_remote_private_route(route_id_blob) { 263 | Ok(route) => route, 264 | Err(e) => { 265 | eprintln!("Failed to import remote private route: {:?}", e); 266 | return Err(e.into()); 267 | } 268 | }; 269 | 270 | // Send an AppMessage to the repo owner using the imported route ID 271 | if let Err(e) = routing_context 272 | .app_message(veilid_core::Target::PrivateRoute(route_id), message) 273 | .await 274 | { 275 | eprintln!("Failed to send message: {:?}", e); 276 | return Err(e.into()); 277 | } 278 | 279 | Ok(()) 280 | } 281 | 282 | fn get_write_key(&self) -> Option { 283 | unimplemented!("WIP") 284 | } 285 | 286 | async fn members(&self) -> Result> { 287 | unimplemented!("WIP") 288 | } 289 | 290 | async fn join(&self) -> Result<()> { 291 | unimplemented!("WIP") 292 | } 293 | 294 | async fn leave(&self) -> Result<()> { 295 | unimplemented!("WIP") 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /src/constants.rs: -------------------------------------------------------------------------------- 1 | pub const GROUP_NOT_FOUND: &str = "Group not found"; 2 | pub const UNABLE_TO_SET_GROUP_NAME: &str = "Unable to set group name"; 3 | pub const UNABLE_TO_GET_GROUP_NAME: &str = "Unable to get group name"; 4 | pub const TEST_GROUP_NAME: &str = "Test Group"; 5 | pub const UNABLE_TO_STORE_KEYPAIR: &str = "Unable to store keypair"; 6 | pub const FAILED_TO_LOAD_KEYPAIR: &str = "Failed to load keypair"; 7 | pub const KEYPAIR_NOT_FOUND: &str = "Keypair not found"; 8 | pub const FAILED_TO_DESERIALIZE_KEYPAIR: &str = "Failed to deserialize keypair"; 9 | pub const ROUTE_ID_DHT_KEY: u32 = 2; 10 | 11 | pub const KNOWN_GROUP_LIST: &str = "SAVE_BACKEND_KNOWN_GROUPS"; 12 | -------------------------------------------------------------------------------- /src/group.rs: -------------------------------------------------------------------------------- 1 | use crate::common::CommonKeypair; 2 | use crate::repo::Repo; 3 | use crate::{common::DHTEntity, repo}; 4 | use anyhow::{anyhow, Error, Result}; 5 | use bytes::Bytes; 6 | use hex::ToHex; 7 | use iroh::net::key::SecretKey; 8 | use iroh_blobs::Hash; 9 | use rand::seq::SliceRandom; 10 | use rand::thread_rng; 11 | use serde::{Deserialize, Serialize}; 12 | use std::any::Any; 13 | use std::collections::HashMap; 14 | use std::future::Future; 15 | use std::time::{SystemTime, UNIX_EPOCH}; 16 | 17 | use std::path::PathBuf; 18 | use std::result; 19 | use std::sync::Arc; 20 | use tokio::sync::{mpsc, Mutex}; 21 | use url::Url; 22 | use veilid_core::{ 23 | CryptoKey, CryptoSystemVLD0, CryptoTyped, DHTRecordDescriptor, DHTReportScope, DHTSchema, 24 | KeyPair, ProtectedStore, RoutingContext, SharedSecret, TypedKey, ValueSubkeyRangeSet, 25 | VeilidAPI, VeilidUpdate, CRYPTO_KEY_LENGTH, CRYPTO_KIND_VLD0, 26 | }; 27 | use veilid_iroh_blobs::iroh::VeilidIrohBlobs; 28 | 29 | pub const PROTOCOL_SCHEME: &str = "save+dweb:"; 30 | pub const URL_DHT_KEY: &str = "dht"; 31 | pub const URL_ENCRYPTION_KEY: &str = "enc"; 32 | pub const URL_PUBLIC_KEY: &str = "pk"; 33 | pub const URL_SECRET_KEY: &str = "sk"; 34 | 35 | #[derive(Clone)] 36 | pub struct Group { 37 | pub dht_record: DHTRecordDescriptor, 38 | pub encryption_key: SharedSecret, 39 | pub routing_context: RoutingContext, 40 | pub repos: Arc>>, 41 | pub veilid: VeilidAPI, 42 | pub iroh_blobs: VeilidIrohBlobs, 43 | } 44 | 45 | impl Group { 46 | pub fn new( 47 | dht_record: DHTRecordDescriptor, 48 | encryption_key: SharedSecret, 49 | routing_context: RoutingContext, 50 | veilid: VeilidAPI, 51 | iroh_blobs: VeilidIrohBlobs, 52 | ) -> Self { 53 | Self { 54 | dht_record, 55 | encryption_key, 56 | routing_context, 57 | repos: Arc::new(Mutex::new(HashMap::new())), 58 | veilid, 59 | iroh_blobs, 60 | } 61 | } 62 | 63 | pub fn id(&self) -> CryptoKey { 64 | self.dht_record.key().value.clone() 65 | } 66 | 67 | pub fn owner_key(&self) -> CryptoKey { 68 | self.dht_record.owner().clone() 69 | } 70 | 71 | pub fn owner_secret(&self) -> Option { 72 | self.dht_record.owner_secret().cloned() 73 | } 74 | 75 | async fn add_repo(&mut self, repo: Repo) -> Result<()> { 76 | let id = repo.id(); 77 | self.repos.lock().await.insert(id, repo); 78 | Ok(()) 79 | } 80 | 81 | pub async fn get_repo(&self, id: &CryptoKey) -> Result { 82 | self.repos 83 | .lock() 84 | .await 85 | .get(id) 86 | .ok_or_else(|| anyhow!("Repo not loaded")) 87 | .cloned() 88 | } 89 | 90 | pub async fn has_repo(&self, id: &CryptoKey) -> bool { 91 | self.repos.lock().await.contains_key(id) 92 | } 93 | 94 | pub async fn list_repos(&self) -> Vec { 95 | self.repos 96 | .lock() 97 | .await 98 | .values() 99 | .map(|repo| repo.get_id()) 100 | .collect() 101 | } 102 | 103 | pub async fn get_own_repo(&self) -> Option { 104 | self.repos 105 | .lock() 106 | .await 107 | .values() 108 | .find(|repo| repo.can_write()) 109 | .cloned() 110 | } 111 | 112 | pub async fn list_peer_repos(&self) -> Vec { 113 | self.repos 114 | .lock() 115 | .await 116 | .values() 117 | .filter(|repo| !repo.can_write()) 118 | .cloned() 119 | .collect() 120 | } 121 | 122 | pub async fn download_hash_from_peers(&self, hash: &Hash) -> Result<()> { 123 | // Ask peers to download in random order 124 | let mut rng = thread_rng(); 125 | let mut repos = self.list_peer_repos().await; 126 | repos.shuffle(&mut rng); 127 | 128 | if repos.is_empty() { 129 | return Err(anyhow!("Cannot download hash. No other peers found")); 130 | } 131 | 132 | for repo in repos.iter() { 133 | if let Ok(route_id_blob) = repo.get_route_id_blob().await { 134 | // It's faster to try and fail, than to ask then try 135 | let result = self 136 | .iroh_blobs 137 | .download_file_from(route_id_blob, hash) 138 | .await; 139 | if result.is_ok() { 140 | return Ok(()); 141 | } else { 142 | eprintln!("Unable to download from peer, {}", result.unwrap_err()); 143 | } 144 | } 145 | } 146 | 147 | Err(anyhow!("Unable to download from any peer")) 148 | } 149 | 150 | pub async fn peers_have_hash(&self, hash: &Hash) -> Result { 151 | for repo in self.list_peer_repos().await.iter() { 152 | if let Ok(route_id_blob) = repo.get_route_id_blob().await { 153 | println!("Asking {} from {} via {:?}", hash, repo.id(), route_id_blob); 154 | if let Ok(has) = self.iroh_blobs.ask_hash(route_id_blob, *hash).await { 155 | if has { 156 | return Ok(true); 157 | } 158 | } 159 | } 160 | } 161 | 162 | Ok(false) 163 | } 164 | 165 | pub async fn has_hash(&self, hash: &Hash) -> Result { 166 | let has = self.iroh_blobs.has_hash(hash).await; 167 | 168 | Ok(has) 169 | } 170 | 171 | pub async fn get_stream_from_hash( 172 | &self, 173 | hash: &Hash, 174 | ) -> Result>> { 175 | if !self.has_hash(hash).await? { 176 | self.download_hash_from_peers(hash).await? 177 | } 178 | 179 | let receiver = self.iroh_blobs.read_file(*hash).await.unwrap(); 180 | 181 | Ok(receiver) 182 | } 183 | 184 | pub async fn get_repo_name(&self, repo_key: CryptoKey) -> Result { 185 | if let Some(repo) = self.repos.lock().await.get(&repo_key) { 186 | repo.get_name().await 187 | } else { 188 | Err(anyhow!("Repo not found")) 189 | } 190 | } 191 | 192 | pub fn get_url(&self) -> String { 193 | let mut url = Url::parse(format!("{0}:?", PROTOCOL_SCHEME).as_str()).unwrap(); 194 | 195 | url.query_pairs_mut() 196 | .append_pair(URL_DHT_KEY, self.id().encode_hex::().as_str()) 197 | .append_pair( 198 | URL_ENCRYPTION_KEY, 199 | self.get_encryption_key().encode_hex::().as_str(), 200 | ) 201 | .append_pair( 202 | URL_PUBLIC_KEY, 203 | self.owner_key().encode_hex::().as_str(), 204 | ) 205 | .append_pair( 206 | URL_SECRET_KEY, 207 | self.owner_secret().unwrap().encode_hex::().as_str(), 208 | ); 209 | 210 | url.to_string() 211 | } 212 | 213 | async fn dht_repo_count(&self) -> Result { 214 | let dht_record = &self.dht_record; 215 | let range = ValueSubkeyRangeSet::full(); 216 | let scope = DHTReportScope::UpdateGet; 217 | 218 | let record_key = dht_record.key().clone(); 219 | 220 | let report = self 221 | .routing_context 222 | .inspect_dht_record(record_key, range, scope) 223 | .await?; 224 | 225 | let size = report.network_seqs().len(); 226 | 227 | let mut count = 0; 228 | 229 | while count < (size - 1) { 230 | let value = self 231 | .routing_context 232 | .get_dht_value(record_key, (count + 1).try_into()?, true) 233 | .await?; 234 | if value.is_some() { 235 | count += 1; 236 | } else { 237 | return Ok(count); 238 | } 239 | } 240 | 241 | Ok(count) 242 | } 243 | 244 | pub async fn advertise_own_repo(&self) -> Result<()> { 245 | let repo = self 246 | .get_own_repo() 247 | .await 248 | .ok_or_else(|| anyhow!("No own repo found for group"))?; 249 | 250 | let repo_key = repo.id().to_vec(); 251 | 252 | let count = self.dht_repo_count().await? + 1; 253 | 254 | self.routing_context 255 | .set_dht_value( 256 | self.dht_record.key().clone(), 257 | count.try_into()?, 258 | repo_key, 259 | None, 260 | ) 261 | .await?; 262 | 263 | Ok(()) 264 | } 265 | 266 | pub async fn load_repo_from_network( 267 | &mut self, 268 | repo_id: TypedKey, 269 | ) -> Result { 270 | // TODO: Load keypair from DHT 271 | // let protected_store = self.protected_store().unwrap(); 272 | // Load keypair using the repo ID 273 | // let retrieved_keypair = CommonKeypair::load_keypair(&protected_store, &repo_id.value) 274 | // .await 275 | // .map_err(|_| anyhow!("Failed to load keypair for repo_id: {:?}", repo_id))?; 276 | // Some(KeyPair::new( 277 | // owner_key.clone(), 278 | // retrieved_keypair.secret_key.clone().unwrap(), 279 | // )) 280 | let keypair = None; 281 | 282 | let veilid = self.get_veilid_api(); 283 | let mut dht_record: Option = None; 284 | let mut retries = 6; 285 | 286 | while retries > 0 { 287 | retries -= 1; 288 | let dht_record_result = self 289 | .routing_context 290 | .open_dht_record(repo_id.clone(), keypair.clone()) 291 | .await; 292 | 293 | match dht_record_result { 294 | Ok(record) => { 295 | dht_record = Some(record); 296 | break; 297 | } 298 | Err(e) => { 299 | eprintln!( 300 | "Failed to open DHT record: {}. Retries left: {}", 301 | e, retries 302 | ); 303 | if retries == 0 { 304 | return Err(anyhow!( 305 | "Unable to open DHT record, reached max retries: {}", 306 | e 307 | )); 308 | } 309 | } 310 | } 311 | 312 | // Add a delay before retrying (wit exponential backoff) 313 | tokio::time::sleep(std::time::Duration::from_millis(100 * (7 - retries) as u64)).await; 314 | } 315 | 316 | // Ensure that `dht_record` is set before proceeding 317 | let dht_record = dht_record.ok_or_else(|| anyhow!("DHT record retrieval failed"))?; 318 | 319 | let repo = Repo { 320 | dht_record, 321 | encryption_key: self.encryption_key.clone(), 322 | secret_key: None, 323 | routing_context: self.routing_context.clone(), 324 | veilid: veilid.clone(), 325 | iroh_blobs: self.iroh_blobs.clone(), 326 | }; 327 | 328 | self.add_repo(repo.clone()).await?; 329 | 330 | Ok(repo) 331 | } 332 | 333 | async fn load_repo_from_dht(&mut self, subkey: u32) -> Result> { 334 | let repo_id_raw = self 335 | .routing_context 336 | .get_dht_value(self.dht_record.key().clone(), subkey, true) 337 | .await? 338 | .ok_or_else(|| anyhow!("Unable to load repo ID from DHT"))?; 339 | 340 | let mut repo_id_buffer: [u8; CRYPTO_KEY_LENGTH] = [0; CRYPTO_KEY_LENGTH]; 341 | 342 | // Validate the length before copying 343 | if repo_id_raw.data().len() != repo_id_buffer.len() { 344 | return Err(anyhow!( 345 | "Slice length mismatch: expected {}, got {}", 346 | repo_id_buffer.len(), 347 | repo_id_raw.data().len() 348 | )); 349 | } 350 | 351 | repo_id_buffer.copy_from_slice(repo_id_raw.data()); 352 | 353 | let repo_id = TypedKey::new(CRYPTO_KIND_VLD0, CryptoKey::from(repo_id_buffer)); 354 | 355 | if self.repos.lock().await.contains_key(&repo_id.value) { 356 | return Ok(repo_id); 357 | } 358 | self.load_repo_from_network(repo_id).await?; 359 | 360 | Ok(repo_id) 361 | } 362 | 363 | pub async fn load_repos_from_dht(&mut self) -> Result<()> { 364 | let count = self.dht_repo_count().await?; 365 | 366 | let mut i = 1; 367 | while i <= count { 368 | println!("Loading from DHT {}", i); 369 | if let Err(e) = self.load_repo_from_dht(i.try_into()?).await { 370 | eprintln!("Warning: Failed to load repo {} from DHT: {:?}", i, e); 371 | } 372 | i += 1; 373 | } 374 | 375 | Ok(()) 376 | } 377 | 378 | pub async fn try_load_repo_from_disk(&mut self) -> bool { 379 | if let Err(err) = self.load_repo_from_disk().await { 380 | eprintln!("Unable to load own repo from disk {}", err); 381 | false 382 | } else { 383 | true 384 | } 385 | } 386 | 387 | pub async fn load_repo_from_disk(&mut self) -> Result { 388 | let protected_store = self.veilid.protected_store().unwrap(); 389 | 390 | let mut group_repo_key = self.id().to_string(); 391 | group_repo_key.push_str("-repo"); 392 | 393 | let key_bytes = protected_store 394 | .load_user_secret(group_repo_key) 395 | .map_err(|err| anyhow!("Unable to load repo from disk"))? 396 | .ok_or_else(|| anyhow!("No repo exists on disk for this group"))?; 397 | 398 | let mut id_bytes: [u8; CRYPTO_KEY_LENGTH] = [0; CRYPTO_KEY_LENGTH]; 399 | id_bytes.copy_from_slice(&key_bytes); 400 | let repo_id = TypedKey::new(CRYPTO_KIND_VLD0, CryptoKey::from(id_bytes)); 401 | 402 | let keypair = self.get_repo_keypair(repo_id).await?; 403 | 404 | let dht_record = self 405 | .routing_context 406 | .open_dht_record( 407 | repo_id.clone(), 408 | Some(KeyPair::new( 409 | keypair.public_key.clone(), 410 | keypair.secret_key.clone().unwrap(), 411 | )), 412 | ) 413 | .await?; 414 | 415 | let secret_key = keypair 416 | .secret_key 417 | .map(|key| TypedKey::new(CRYPTO_KIND_VLD0, key)); 418 | 419 | let repo = Repo::new( 420 | dht_record, 421 | self.encryption_key.clone(), 422 | secret_key, 423 | self.routing_context.clone(), 424 | self.veilid.clone(), 425 | self.iroh_blobs.clone(), 426 | ); 427 | repo.update_route_on_dht().await?; 428 | 429 | self.add_repo(repo.clone()).await?; 430 | 431 | Ok(repo) 432 | } 433 | 434 | pub async fn create_repo(&mut self) -> Result { 435 | if self.get_own_repo().await.is_some() { 436 | return Err(anyhow!("Own repo already created for group")); 437 | } 438 | 439 | // Create a new DHT record for the repo 440 | let schema = DHTSchema::dflt(3)?; 441 | let kind = Some(CRYPTO_KIND_VLD0); 442 | let repo_dht_record = self 443 | .routing_context 444 | .create_dht_record(schema, None, kind) 445 | .await?; 446 | 447 | // Identify the repo with the DHT record's key 448 | let repo_id = repo_dht_record.key().clone(); 449 | 450 | // Use the group's encryption key for the repo 451 | let encryption_key = self.get_encryption_key().clone(); 452 | 453 | // Wrap the secret key in CryptoTyped for storage 454 | let secret_key_typed = 455 | CryptoTyped::new(CRYPTO_KIND_VLD0, self.get_secret_key().unwrap().clone()); 456 | 457 | let repo = Repo::new( 458 | repo_dht_record.clone(), 459 | encryption_key, 460 | Some(secret_key_typed), 461 | self.routing_context.clone(), 462 | self.veilid.clone(), 463 | self.iroh_blobs.clone(), 464 | ); 465 | 466 | // This should happen every time the route ID changes 467 | repo.update_route_on_dht().await?; 468 | 469 | let protected_store = self.veilid.protected_store().unwrap(); 470 | 471 | let keypair = CommonKeypair { 472 | id: repo.id(), 473 | public_key: repo_dht_record.owner().clone(), 474 | secret_key: repo_dht_record.owner_secret().copied(), 475 | encryption_key: encryption_key.clone(), 476 | }; 477 | 478 | keypair 479 | .store_keypair(&protected_store) 480 | .await 481 | .map_err(|e| anyhow!(e))?; 482 | 483 | let mut group_repo_key = self.id().to_string(); 484 | group_repo_key.push_str("-repo"); 485 | let key_bytes = *repo.id(); 486 | protected_store 487 | .save_user_secret(group_repo_key, key_bytes.as_slice()) 488 | .map_err(|e| anyhow!("Unable to store repo id for group: {}", e))?; 489 | 490 | self.add_repo(repo).await?; 491 | 492 | self.advertise_own_repo().await?; 493 | 494 | self.get_own_repo() 495 | .await 496 | .ok_or_else(|| anyhow!("Unexpected error, created repo not persisted")) 497 | } 498 | 499 | async fn get_repo_keypair(&self, repo_id: TypedKey) -> Result { 500 | let protected_store = self.veilid.protected_store()?; 501 | 502 | // Load keypair using the repo ID 503 | CommonKeypair::load_keypair(&protected_store, &repo_id.value) 504 | .await 505 | .map_err(|_| anyhow!("Failed to load keypair for repo_id: {:?}", repo_id)) 506 | } 507 | 508 | pub async fn watch_changes(&self, on_change: F) -> Result<()> 509 | where 510 | F: Fn() -> Fut + Send + Sync + 'static, 511 | Fut: Future> + Send + 'static, 512 | { 513 | let repo_count = self.dht_repo_count().await?; 514 | let range = if repo_count > 0 { 515 | ValueSubkeyRangeSet::single_range(0, repo_count as u32 - 1) 516 | } else { 517 | ValueSubkeyRangeSet::full() 518 | }; 519 | 520 | let expiration_duration = 600_000_000; 521 | let expiration = 522 | SystemTime::now().duration_since(UNIX_EPOCH)?.as_micros() as u64 + expiration_duration; 523 | let count = 0; 524 | 525 | // Clone necessary data for the async block 526 | let routing_context = self.routing_context.clone(); 527 | let dht_record_key = self.dht_record.key().clone(); 528 | 529 | // Spawn a task that uses only owned data 530 | tokio::spawn(async move { 531 | match routing_context 532 | .watch_dht_values( 533 | dht_record_key.clone(), 534 | range.clone(), 535 | expiration.into(), 536 | count, 537 | ) 538 | .await 539 | { 540 | Ok(_) => { 541 | println!( 542 | "DHT watch successfully set on record key {:?}", 543 | dht_record_key 544 | ); 545 | 546 | loop { 547 | if let Ok(change) = routing_context 548 | .watch_dht_values( 549 | dht_record_key.clone(), 550 | range.clone(), 551 | expiration.into(), 552 | count, 553 | ) 554 | .await 555 | { 556 | if change > 0.into() { 557 | if let Err(e) = on_change().await { 558 | eprintln!("Failed to re-download files: {:?}", e); 559 | } 560 | } 561 | } 562 | } 563 | } 564 | Err(e) => eprintln!("Failed to set DHT watch: {:?}", e), 565 | } 566 | }); 567 | 568 | Ok(()) 569 | } 570 | } 571 | 572 | impl DHTEntity for Group { 573 | fn get_id(&self) -> CryptoKey { 574 | self.id().clone() 575 | } 576 | 577 | fn get_encryption_key(&self) -> SharedSecret { 578 | self.encryption_key.clone() 579 | } 580 | 581 | fn get_routing_context(&self) -> RoutingContext { 582 | self.routing_context.clone() 583 | } 584 | 585 | fn get_veilid_api(&self) -> VeilidAPI { 586 | self.veilid.clone() 587 | } 588 | 589 | fn get_dht_record(&self) -> DHTRecordDescriptor { 590 | self.dht_record.clone() 591 | } 592 | 593 | fn get_secret_key(&self) -> Option { 594 | self.owner_secret() 595 | } 596 | } 597 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![recursion_limit = "256"] 2 | pub mod backend; 3 | pub mod common; 4 | pub mod constants; 5 | pub mod group; 6 | pub mod repo; 7 | pub mod rpc; 8 | 9 | use crate::constants::{ 10 | FAILED_TO_DESERIALIZE_KEYPAIR, FAILED_TO_LOAD_KEYPAIR, GROUP_NOT_FOUND, KEYPAIR_NOT_FOUND, 11 | ROUTE_ID_DHT_KEY, TEST_GROUP_NAME, UNABLE_TO_GET_GROUP_NAME, UNABLE_TO_SET_GROUP_NAME, 12 | UNABLE_TO_STORE_KEYPAIR, 13 | }; 14 | 15 | use crate::backend::Backend; 16 | use crate::common::{CommonKeypair, DHTEntity}; 17 | 18 | use iroh_blobs::Hash; 19 | use veilid_core::{ 20 | vld0_generate_keypair, CryptoKey, CryptoTyped, TypedKey, VeilidUpdate, CRYPTO_KIND_VLD0, 21 | VALID_CRYPTO_KINDS, 22 | }; 23 | use veilid_iroh_blobs::iroh::VeilidIrohBlobs; 24 | 25 | use serial_test::serial; 26 | 27 | #[cfg(test)] 28 | mod tests { 29 | use super::*; 30 | use anyhow::anyhow; 31 | use anyhow::Result; 32 | use bytes::Bytes; 33 | use common::init_veilid; 34 | use common::make_route; 35 | use rpc::RpcClient; 36 | use rpc::RpcService; 37 | use std::path::Path; 38 | use std::result; 39 | use tmpdir::TmpDir; 40 | use tokio::fs; 41 | use tokio::join; 42 | use tokio::sync::mpsc; 43 | use tokio::time::sleep; 44 | use tokio::time::Duration; 45 | use tokio_stream::wrappers::ReceiverStream; 46 | use tokio_stream::StreamExt; 47 | 48 | #[tokio::test] 49 | #[serial] 50 | async fn blob_transfer() -> Result<()> { 51 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 52 | 53 | fs::create_dir_all(path.as_ref()) 54 | .await 55 | .expect("Failed to create base directory"); 56 | 57 | // Initialize the backend 58 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 59 | backend.start().await.expect("Unable to start"); 60 | 61 | // Create a group and a repo 62 | let mut group = backend 63 | .create_group() 64 | .await 65 | .expect("Unable to create group"); 66 | let repo = group.create_repo().await.expect("Unable to create repo"); 67 | 68 | let iroh_blobs = backend 69 | .get_iroh_blobs() 70 | .await 71 | .expect("iroh_blobs not initialized"); 72 | 73 | // Prepare data to upload as a blob 74 | let data_to_upload = b"Test data for blob".to_vec(); 75 | let (tx, rx) = mpsc::channel::>(1); 76 | tx.send(Ok(Bytes::from(data_to_upload.clone()))) 77 | .await 78 | .unwrap(); 79 | drop(tx); // Close the sender 80 | 81 | // upload the data as a blob and get the hash 82 | let hash = iroh_blobs 83 | .upload_from_stream(rx) 84 | .await 85 | .expect("Failed to upload blob"); 86 | 87 | // some delay to ensure blob is uploaded 88 | tokio::time::sleep(Duration::from_millis(100)).await; 89 | 90 | // download the blob 91 | let receiver = iroh_blobs 92 | .read_file(hash.clone()) 93 | .await 94 | .expect("Failed to read blob"); 95 | 96 | // retrieve the data from the receiver 97 | let mut retrieved_data = Vec::new(); 98 | let mut stream = ReceiverStream::new(receiver); 99 | while let Some(chunk_result) = stream.next().await { 100 | match chunk_result { 101 | Ok(bytes) => retrieved_data.extend_from_slice(bytes.as_ref()), 102 | Err(e) => panic!("Error reading data: {:?}", e), 103 | } 104 | } 105 | 106 | // Verify that the downloaded data matches the uploaded data 107 | assert_eq!(retrieved_data, data_to_upload); 108 | 109 | backend.stop().await.expect("Unable to stop"); 110 | Ok(()) 111 | } 112 | 113 | #[tokio::test] 114 | #[serial] 115 | async fn group_creation() -> Result<()> { 116 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 117 | 118 | fs::create_dir_all(path.as_ref()) 119 | .await 120 | .expect("Failed to create base directory"); 121 | 122 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 123 | backend.start().await.expect("Unable to start"); 124 | 125 | let group = backend 126 | .create_group() 127 | .await 128 | .expect("Unable to create group"); 129 | 130 | group 131 | .set_name(TEST_GROUP_NAME) 132 | .await 133 | .expect(UNABLE_TO_SET_GROUP_NAME); 134 | let name = group.get_name().await.expect(UNABLE_TO_GET_GROUP_NAME); 135 | assert_eq!(name, TEST_GROUP_NAME); 136 | 137 | backend.stop().await.expect("Unable to stop"); 138 | Ok(()) 139 | } 140 | 141 | #[tokio::test] 142 | #[serial] 143 | async fn keypair_storage_and_retrieval() -> Result<()> { 144 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 145 | 146 | fs::create_dir_all(path.as_ref()) 147 | .await 148 | .expect("Failed to create base directory"); 149 | 150 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 151 | backend.start().await.expect("Unable to start"); 152 | 153 | let group = backend 154 | .create_group() 155 | .await 156 | .expect("Unable to create group"); 157 | backend.stop().await.expect("Unable to stop"); 158 | 159 | backend.start().await.expect("Unable to restart"); 160 | 161 | let mut loaded_group = backend.get_group(&group.id()).await.expect(GROUP_NOT_FOUND); 162 | 163 | let veilid = backend.get_veilid_api().await.unwrap(); 164 | let protected_store = veilid.protected_store().unwrap(); 165 | let keypair_data = protected_store 166 | .load_user_secret(group.id().to_string()) 167 | .expect(FAILED_TO_LOAD_KEYPAIR) 168 | .expect(KEYPAIR_NOT_FOUND); 169 | 170 | let retrieved_keypair: CommonKeypair = 171 | serde_cbor::from_slice(&keypair_data).expect(FAILED_TO_DESERIALIZE_KEYPAIR); 172 | 173 | // Check that the id matches group.id() 174 | assert_eq!(retrieved_keypair.id, group.id()); 175 | 176 | // Check that the public_key matches the owner public key from the DHT record 177 | assert_eq!( 178 | retrieved_keypair.public_key, 179 | loaded_group.get_dht_record().owner().clone() 180 | ); 181 | 182 | // Check that the secret and encryption keys match 183 | assert_eq!(retrieved_keypair.secret_key, group.get_secret_key()); 184 | assert_eq!(retrieved_keypair.encryption_key, group.get_encryption_key()); 185 | 186 | backend.stop().await.expect("Unable to stop"); 187 | Ok(()) 188 | } 189 | 190 | #[tokio::test] 191 | #[serial] 192 | async fn repo_creation() -> Result<()> { 193 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 194 | 195 | fs::create_dir_all(path.as_ref()) 196 | .await 197 | .expect("Failed to create base directory"); 198 | 199 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 200 | backend.start().await.expect("Unable to start"); 201 | 202 | // Step 1: Create a group before creating a repo 203 | let mut group = backend 204 | .create_group() 205 | .await 206 | .expect("Unable to create group"); 207 | 208 | // Step 2: Create a repo 209 | let repo = group.create_repo().await.expect("Unable to create repo"); 210 | 211 | let repo_key = repo.get_id(); 212 | assert!(repo_key != CryptoKey::default(), "Repo ID should be set"); 213 | 214 | // Step 3: Set and verify the repo name 215 | let repo_name = "Test Repo"; 216 | 217 | repo.set_name(repo_name) 218 | .await 219 | .expect("Unable to set repo name"); 220 | 221 | let name = repo.get_name().await.expect(UNABLE_TO_GET_GROUP_NAME); 222 | 223 | assert_eq!(name, repo_name); 224 | 225 | // Step 5: List known repos and verify the repo is in the list 226 | let repos = group.list_repos().await; 227 | assert!(repos.contains(&repo_key)); 228 | 229 | // Step 6: Retrieve the repo by key and check its name 230 | let loaded_repo = group.get_repo(&repo_key).await.expect("Repo not found"); 231 | 232 | let retrieved_name = loaded_repo 233 | .get_name() 234 | .await 235 | .expect("Unable to get repo name after restart"); 236 | assert_eq!(retrieved_name, repo_name); 237 | 238 | backend.stop().await.expect("Unable to stop"); 239 | Ok(()) 240 | } 241 | 242 | #[tokio::test] 243 | #[serial] 244 | async fn sending_message_via_private_route() -> Result<()> { 245 | tokio::time::timeout(Duration::from_secs(888), async { 246 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 247 | 248 | fs::create_dir_all(path.as_ref()) 249 | .await 250 | .expect("Failed to create base directory"); 251 | 252 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 253 | backend.start().await.expect("Unable to start"); 254 | 255 | // Add delay to ensure backend initialization 256 | tokio::time::sleep(Duration::from_secs(2)).await; 257 | 258 | // Create a group and a repo 259 | let mut group = backend 260 | .create_group() 261 | .await 262 | .expect("Unable to create group"); 263 | let repo = group.create_repo().await.expect("Unable to create repo"); 264 | let veilid_api = backend 265 | .get_veilid_api() 266 | .await 267 | .expect("Failed to get VeilidAPI instance"); 268 | 269 | // Get the update receiver from the backend 270 | let update_rx = backend 271 | .subscribe_updates() 272 | .await 273 | .expect("Failed to subscribe to updates"); 274 | 275 | // Set up a channel to receive AppMessage updates 276 | let (message_tx, mut message_rx) = mpsc::channel(1); 277 | 278 | // Spawn a task to listen for updates 279 | tokio::spawn(async move { 280 | let mut rx = update_rx.resubscribe(); 281 | while let Ok(update) = rx.recv().await { 282 | if let VeilidUpdate::AppMessage(app_message) = update { 283 | // Optionally, filter by route_id or other criteria 284 | message_tx.send(app_message).await.unwrap(); 285 | } 286 | } 287 | }); 288 | 289 | println!( 290 | "Creating a new custom private route with valid crypto kinds: {:?}", 291 | VALID_CRYPTO_KINDS 292 | ); 293 | 294 | // Create a new private route 295 | let (route_id, route_id_blob) = make_route(&veilid_api) 296 | .await 297 | .expect("Failed to create route after retries"); 298 | 299 | // Store the route_id_blob in DHT 300 | repo.store_route_id_in_dht(route_id_blob.clone()) 301 | .await 302 | .expect("Failed to store route ID blob in DHT"); 303 | 304 | // Define the message to send 305 | let message = b"Test Message to Repo Owner".to_vec(); 306 | 307 | println!("Sending message to owner..."); 308 | 309 | // Send the message 310 | repo.send_message_to_owner(&veilid_api, message.clone(), ROUTE_ID_DHT_KEY) 311 | .await 312 | .expect("Failed to send message to repo owner"); 313 | 314 | // Receive the message from the background task 315 | let received_app_message = message_rx.recv().await.expect("Failed to receive message"); 316 | 317 | // Verify the message 318 | assert_eq!(received_app_message.message(), message.as_slice()); 319 | 320 | backend.stop().await.expect("Unable to stop"); 321 | Ok::<(), anyhow::Error>(()) 322 | }) 323 | .await??; 324 | 325 | Ok(()) 326 | } 327 | 328 | #[tokio::test] 329 | #[serial] 330 | async fn known_group_persistence() -> Result<()> { 331 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 332 | 333 | fs::create_dir_all(path.as_ref()) 334 | .await 335 | .expect("Failed to create base directory"); 336 | 337 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 338 | backend.start().await.expect("Unable to start"); 339 | 340 | let group = backend 341 | .create_group() 342 | .await 343 | .expect("Unable to create group"); 344 | group 345 | .set_name(TEST_GROUP_NAME) 346 | .await 347 | .expect(UNABLE_TO_SET_GROUP_NAME); 348 | 349 | drop(group); 350 | 351 | backend.stop().await.expect("Unable to stop"); 352 | 353 | backend.start().await.expect("Unable to restart"); 354 | 355 | let list = backend.list_groups().await?; 356 | 357 | assert_eq!(list.len(), 1, "Group auto-loaded on start"); 358 | 359 | backend.stop().await.expect("Unable to stop"); 360 | Ok(()) 361 | } 362 | 363 | #[tokio::test] 364 | #[serial] 365 | async fn group_name_persistence() -> Result<()> { 366 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 367 | 368 | fs::create_dir_all(path.as_ref()) 369 | .await 370 | .expect("Failed to create base directory"); 371 | 372 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 373 | backend.start().await.expect("Unable to start"); 374 | 375 | let group = backend 376 | .create_group() 377 | .await 378 | .expect("Unable to create group"); 379 | group 380 | .set_name(TEST_GROUP_NAME) 381 | .await 382 | .expect(UNABLE_TO_SET_GROUP_NAME); 383 | 384 | backend.stop().await.expect("Unable to stop"); 385 | 386 | backend.start().await.expect("Unable to restart"); 387 | let loaded_group = backend.get_group(&group.id()).await.expect(GROUP_NOT_FOUND); 388 | 389 | let name = loaded_group 390 | .get_name() 391 | .await 392 | .expect(UNABLE_TO_GET_GROUP_NAME); 393 | assert_eq!(name, TEST_GROUP_NAME); 394 | 395 | backend.stop().await.expect("Unable to stop"); 396 | Ok(()) 397 | } 398 | #[tokio::test] 399 | #[serial] 400 | async fn repo_persistence() -> Result<()> { 401 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 402 | 403 | fs::create_dir_all(path.as_ref()) 404 | .await 405 | .expect("Failed to create base directory"); 406 | 407 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 408 | backend.start().await.expect("Unable to start backend"); 409 | 410 | let mut group = backend 411 | .create_group() 412 | .await 413 | .expect("Failed to create group"); 414 | let group_id = group.id(); 415 | 416 | // Drop the group and stop the backend 417 | drop(group); 418 | backend.stop().await.expect("Unable to stop backend"); 419 | 420 | // Restart backend and verify group and repo persistence 421 | backend.start().await.expect("Unable to restart backend"); 422 | println!( 423 | "Backend restarted, attempting to load group with ID: {:?}", 424 | group_id 425 | ); 426 | 427 | let mut reload_group = backend.get_group(&group_id).await.expect(GROUP_NOT_FOUND); 428 | let loaded_group_id = reload_group.id(); 429 | 430 | // Drop the group and stop the backend 431 | drop(reload_group); 432 | backend.stop().await.expect("Unable to stop backend"); 433 | 434 | // Restart backend and verify group and repo persistence 435 | backend.start().await.expect("Unable to restart backend"); 436 | println!( 437 | "Backend restarted, attempting to load group with ID: {:?}", 438 | loaded_group_id 439 | ); 440 | 441 | let mut loaded_group = backend 442 | .get_group(&loaded_group_id) 443 | .await 444 | .expect(GROUP_NOT_FOUND); 445 | println!("group reloaded with id: {:?}", loaded_group_id); 446 | let repo = loaded_group 447 | .create_repo() 448 | .await 449 | .expect("Unable to create repo"); 450 | 451 | let repo_name = "Test Repo"; 452 | repo.set_name(repo_name) 453 | .await 454 | .expect("Unable to set repo name"); 455 | 456 | let initial_name = repo.get_name().await.expect("Unable to get repo name"); 457 | assert_eq!(initial_name, repo_name, "Initial repo name doesn't match"); 458 | 459 | let repo_id = repo.id(); 460 | println!("lib: Repo created with id: {:?}", repo_id); 461 | 462 | // Check if the repo is listed after restart 463 | let list = loaded_group.list_repos().await; 464 | assert_eq!(list.len(), 1, "One repo got loaded back"); 465 | 466 | let loaded_repo = loaded_group 467 | .get_own_repo() 468 | .await 469 | .expect("Repo not found after restart"); 470 | 471 | println!("a list of repos: {:?}", list); 472 | 473 | let retrieved_name = loaded_repo 474 | .get_name() 475 | .await 476 | .expect("Unable to get repo name after restart"); 477 | assert_eq!( 478 | retrieved_name, repo_name, 479 | "Repo name doesn't persist after restart" 480 | ); 481 | 482 | // Drop the group again and test reloading 483 | drop(loaded_group); 484 | backend 485 | .stop() 486 | .await 487 | .expect("Unable to stop backend after second drop"); 488 | 489 | backend 490 | .start() 491 | .await 492 | .expect("Unable to restart backend after second drop"); 493 | 494 | // Verify the group and repos again 495 | let reloaded_group = backend.get_group(&group_id).await.expect(GROUP_NOT_FOUND); 496 | let reloaded_repos = reloaded_group.list_repos().await; 497 | assert_eq!( 498 | reloaded_repos.len(), 499 | 1, 500 | "One repo loaded after second restart" 501 | ); 502 | 503 | let another_list = reloaded_group.list_repos().await; 504 | 505 | println!("Another list of repos: {:?}", another_list); 506 | 507 | let reloaded_repo = reloaded_group 508 | .get_own_repo() 509 | .await 510 | .expect("Repo not found after second restart"); 511 | 512 | let final_name = reloaded_repo 513 | .get_name() 514 | .await 515 | .expect("Unable to get repo name after second restart"); 516 | assert_eq!( 517 | final_name, repo_name, 518 | "Repo name doesn't persist after second restart" 519 | ); 520 | 521 | let known = backend.list_known_group_ids().await?; 522 | assert_eq!(known.len(), 1, "One group got saved"); 523 | 524 | backend 525 | .stop() 526 | .await 527 | .expect("Unable to stop backend after verification"); 528 | 529 | Ok(()) 530 | } 531 | 532 | #[tokio::test] 533 | #[serial] 534 | async fn upload_blob_test() -> Result<()> { 535 | let path = TmpDir::new("test_dweb_backend_upload_blob").await.unwrap(); 536 | 537 | fs::create_dir_all(path.as_ref()) 538 | .await 539 | .expect("Failed to create base directory"); 540 | 541 | // Initialize the backend 542 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 543 | backend.start().await.expect("Unable to start"); 544 | 545 | // Create a group 546 | let mut group = backend 547 | .create_group() 548 | .await 549 | .expect("Unable to create group"); 550 | 551 | // Prepare a temporary file to upload as a blob 552 | let tmp_file_path = path.as_ref().join("test_blob_file.txt"); 553 | let file_content = b"Test content for file upload"; 554 | fs::write(&tmp_file_path, file_content) 555 | .await 556 | .expect("Failed to write to temp file"); 557 | 558 | let repo = group.create_repo().await?; 559 | 560 | // Upload the file as a blob and get the hash 561 | let hash = repo 562 | .upload_blob(tmp_file_path.clone()) 563 | .await 564 | .expect("Failed to upload blob"); 565 | 566 | // Verify that the file was uploaded and the hash was written to the DHT 567 | let dht_value = backend 568 | .get_veilid_api() 569 | .await 570 | .expect("veilid_api not initialized") 571 | .routing_context() 572 | .expect("Failed to get routing context") 573 | .get_dht_value(repo.dht_record.key().clone(), 1, false) 574 | .await 575 | .expect("Failed to retrieve DHT value"); 576 | 577 | if let Some(dht_value_data) = dht_value { 578 | // Use the data() method to extract the byte slice 579 | let dht_value_bytes = dht_value_data.data(); 580 | let dht_value_str = String::from_utf8(dht_value_bytes.to_vec()) 581 | .expect("Failed to convert ValueData to String"); 582 | assert_eq!(dht_value_str, hash.to_hex()); 583 | } else { 584 | panic!("No value found in DHT for the given key"); 585 | } 586 | 587 | // Read back the file using the hash 588 | let iroh_blobs = backend 589 | .get_iroh_blobs() 590 | .await 591 | .expect("iroh_blobs not initialized"); 592 | let receiver = iroh_blobs 593 | .read_file(hash.clone()) 594 | .await 595 | .expect("Failed to read blob"); 596 | 597 | // Retrieve the data from the receiver 598 | let mut retrieved_data = Vec::new(); 599 | let mut stream = ReceiverStream::new(receiver); 600 | while let Some(chunk_result) = stream.next().await { 601 | match chunk_result { 602 | Ok(bytes) => retrieved_data.extend_from_slice(bytes.as_ref()), 603 | Err(e) => panic!("Error reading data: {:?}", e), 604 | } 605 | } 606 | 607 | // Verify that the downloaded data matches the original file content 608 | assert_eq!(retrieved_data, file_content); 609 | 610 | backend.stop().await.expect("Unable to stop"); 611 | Ok(()) 612 | } 613 | #[tokio::test] 614 | #[serial] 615 | async fn upload_blob_and_verify_protected_store() -> Result<()> { 616 | let path = TmpDir::new("test_dweb_backend_upload_blob").await.unwrap(); 617 | 618 | fs::create_dir_all(path.as_ref()) 619 | .await 620 | .expect("Failed to create base directory"); 621 | 622 | // Initialize the backend 623 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 624 | backend.start().await.expect("Unable to start"); 625 | 626 | // Create a group 627 | let mut group = backend 628 | .create_group() 629 | .await 630 | .expect("Unable to create group"); 631 | 632 | // Prepare a temporary file to upload as a blob 633 | let tmp_file_path = path.as_ref().join("test_blob_file.txt"); 634 | let file_content = b"Test content for file upload"; 635 | fs::write(&tmp_file_path, file_content) 636 | .await 637 | .expect("Failed to write to temp file"); 638 | 639 | let protected_store = backend 640 | .get_veilid_api() 641 | .await 642 | .unwrap() 643 | .protected_store() 644 | .unwrap(); 645 | 646 | let repo = group.create_repo().await?; 647 | 648 | // Upload the file as a blob and get the hash 649 | let hash = repo 650 | .upload_blob(tmp_file_path.clone()) 651 | .await 652 | .expect("Failed to upload blob"); 653 | 654 | // Verify that the file was uploaded and the hash was written to the DHT 655 | let dht_value = backend 656 | .get_veilid_api() 657 | .await 658 | .expect("veilid_api not initialized") 659 | .routing_context() 660 | .expect("Failed to get routing context") 661 | .get_dht_value(repo.dht_record.key().clone(), 1, false) 662 | .await 663 | .expect("Failed to retrieve DHT value"); 664 | 665 | if let Some(dht_value_data) = dht_value { 666 | // Use the data() method to extract the byte slice 667 | let dht_value_bytes = dht_value_data.data(); 668 | let dht_value_str = String::from_utf8(dht_value_bytes.to_vec()) 669 | .expect("Failed to convert ValueData to String"); 670 | assert_eq!(dht_value_str, hash.to_hex()); 671 | } else { 672 | panic!("No value found in DHT for the given key"); 673 | } 674 | 675 | // Read back the file using the hash 676 | let iroh_blobs = backend 677 | .get_iroh_blobs() 678 | .await 679 | .expect("iroh_blobs not initialized"); 680 | 681 | let receiver = iroh_blobs 682 | .read_file(hash.clone()) 683 | .await 684 | .expect("Failed to read blob"); 685 | 686 | // Retrieve the data from the receiver 687 | let mut retrieved_data = Vec::new(); 688 | let mut stream = ReceiverStream::new(receiver); 689 | while let Some(chunk_result) = stream.next().await { 690 | match chunk_result { 691 | Ok(bytes) => retrieved_data.extend_from_slice(bytes.as_ref()), 692 | Err(e) => panic!("Error reading data: {:?}", e), 693 | } 694 | } 695 | 696 | // Verify that the downloaded data matches the original file content 697 | assert_eq!(retrieved_data, file_content); 698 | 699 | backend.stop().await.expect("Unable to stop"); 700 | Ok(()) 701 | } 702 | 703 | #[tokio::test] 704 | #[serial] 705 | async fn test_join() { 706 | let path = TmpDir::new("test_dweb_backend").await.unwrap(); 707 | 708 | fs::create_dir_all(path.as_ref()) 709 | .await 710 | .expect("Failed to create base directory"); 711 | 712 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 713 | 714 | backend.start().await.expect("Unable to start"); 715 | let group = backend 716 | .create_group() 717 | .await 718 | .expect("Unable to create group"); 719 | 720 | group 721 | .set_name(TEST_GROUP_NAME) 722 | .await 723 | .expect(UNABLE_TO_SET_GROUP_NAME); 724 | 725 | let url = group.get_url(); 726 | 727 | let keys = backend::parse_url(url.as_str()).expect("URL was parsed back out"); 728 | 729 | assert_eq!(keys.id, group.id()); 730 | backend.stop().await.expect("Unable to stop"); 731 | } 732 | 733 | #[tokio::test] 734 | #[serial] 735 | async fn list_repos_test() -> Result<()> { 736 | let path = TmpDir::new("test_dweb_backend_list_repos").await.unwrap(); 737 | 738 | fs::create_dir_all(path.as_ref()) 739 | .await 740 | .expect("Failed to create base directory"); 741 | 742 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 743 | backend.start().await.expect("Unable to start"); 744 | 745 | // Create a group and two repos 746 | let mut group = backend 747 | .create_group() 748 | .await 749 | .expect("Unable to create group"); 750 | let repo1 = group.create_repo().await?.clone(); 751 | 752 | // List repos and verify 753 | let repos = group.list_repos().await; 754 | assert!(repos.contains(&repo1.get_id())); 755 | 756 | backend.stop().await.expect("Unable to stop"); 757 | Ok(()) 758 | } 759 | 760 | #[tokio::test] 761 | #[serial] 762 | async fn get_own_repo_test() -> Result<()> { 763 | let path = TmpDir::new("test_dweb_backend_get_own_repo").await.unwrap(); 764 | 765 | fs::create_dir_all(path.as_ref()) 766 | .await 767 | .expect("Failed to create base directory"); 768 | 769 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 770 | backend.start().await.expect("Unable to start"); 771 | 772 | // Create a group and two repos, one writable 773 | let mut group = backend 774 | .create_group() 775 | .await 776 | .expect("Unable to create group"); 777 | let writable_repo = group.create_repo().await?.clone(); 778 | 779 | // Verify own repo is found 780 | let own_repo = group.get_own_repo().await; 781 | assert!(own_repo.is_some()); 782 | assert_eq!(own_repo.unwrap().get_id(), writable_repo.get_id()); 783 | 784 | backend.stop().await.expect("Unable to stop"); 785 | Ok(()) 786 | } 787 | 788 | #[tokio::test] 789 | #[serial] 790 | async fn download_hash_from_peers_test() -> Result<()> { 791 | let base_dir = TmpDir::new("test_dweb_backend_download_hash") 792 | .await 793 | .unwrap(); 794 | 795 | let base_dir_path = base_dir.to_path_buf(); 796 | 797 | let store1 = 798 | iroh_blobs::store::fs::Store::load(base_dir.to_path_buf().join("iroh1")).await?; 799 | let store2 = 800 | iroh_blobs::store::fs::Store::load(base_dir.to_path_buf().join("iroh2")).await?; 801 | 802 | let (v1_result, v2_result) = join!( 803 | init_veilid(&base_dir_path, "downloadpeers1".to_string()), 804 | init_veilid(&base_dir_path, "downloadpeers2".to_string()) 805 | ); 806 | let (veilid_api1, mut update_rx1) = v1_result?; 807 | let (veilid_api2, mut update_rx2) = v2_result?; 808 | 809 | fs::create_dir_all(base_dir.as_ref()) 810 | .await 811 | .expect("Failed to create base directory"); 812 | 813 | let backend1 = Backend::from_dependencies( 814 | &base_dir.to_path_buf(), 815 | veilid_api1.clone(), 816 | update_rx1, 817 | store1, 818 | ) 819 | .await 820 | .unwrap(); 821 | 822 | let backend2 = Backend::from_dependencies( 823 | &base_dir.to_path_buf(), 824 | veilid_api2.clone(), 825 | update_rx2, 826 | store2, 827 | ) 828 | .await 829 | .unwrap(); 830 | 831 | // Create a group and a peer repo 832 | let mut group = backend1 833 | .create_group() 834 | .await 835 | .expect("Unable to create group"); 836 | 837 | group.set_name("Example").await?; 838 | 839 | let mut peer_repo = group.create_repo().await?; 840 | 841 | sleep(Duration::from_secs(2)).await; 842 | 843 | let group2 = backend2.join_from_url(&group.get_url()).await?; 844 | 845 | // Upload a test blob to the peer repo 846 | let data_to_upload = Bytes::from("Test data for peer download"); 847 | let collection_name = "peer_repo_collection".to_string(); 848 | peer_repo 849 | .iroh_blobs 850 | .create_collection(&collection_name) 851 | .await 852 | .expect("Unable to create collection"); 853 | 854 | // Create a file stream using mpsc 855 | let (tx, rx) = mpsc::channel(1); 856 | tx.send(Ok(data_to_upload.clone())).await.unwrap(); 857 | drop(tx); // Close the sender 858 | 859 | // Upload using the new method `upload_to` 860 | let file_path = "test_file.txt".to_string(); 861 | let file_hash = peer_repo 862 | .iroh_blobs 863 | .upload_to(&collection_name, &file_path, rx) 864 | .await 865 | .expect("Failed to upload to collection"); 866 | 867 | // Add the uploaded file to the collection 868 | let new_file_collection_hash = peer_repo 869 | .iroh_blobs 870 | .set_file(&collection_name, &file_path, &file_hash) 871 | .await 872 | .expect("Unable to add file to collection"); 873 | assert!( 874 | !new_file_collection_hash.as_bytes().is_empty(), 875 | "New collection hash after uploading a file should not be empty" 876 | ); 877 | 878 | sleep(Duration::from_secs(2)).await; 879 | 880 | // Download hash from peers 881 | let mut retries = 10; 882 | while retries > 0 { 883 | if group2.download_hash_from_peers(&file_hash).await.is_ok() { 884 | println!("Download success!"); 885 | break; 886 | } 887 | retries -= 1; 888 | sleep(Duration::from_secs(4)).await; 889 | } 890 | assert!( 891 | retries > 0, 892 | "Failed to download hash from peers after retries" 893 | ); 894 | 895 | backend1.stop().await?; 896 | backend2.stop().await?; 897 | Ok(()) 898 | } 899 | 900 | #[tokio::test] 901 | #[serial] 902 | async fn peers_have_hash_test() -> Result<()> { 903 | let base_dir: TmpDir = TmpDir::new("test_dweb_backend_peers_have_hash") 904 | .await 905 | .unwrap(); 906 | 907 | let base_dir_path = base_dir.to_path_buf(); 908 | 909 | let store1 = 910 | iroh_blobs::store::fs::Store::load(base_dir.to_path_buf().join("iroh1")).await?; 911 | let store2 = 912 | iroh_blobs::store::fs::Store::load(base_dir.to_path_buf().join("iroh2")).await?; 913 | 914 | let (v1_result, v2_result) = join!( 915 | init_veilid(&base_dir_path, "downloadpeers1".to_string()), 916 | init_veilid(&base_dir_path, "downloadpeers2".to_string()) 917 | ); 918 | let (veilid_api1, mut update_rx1) = v1_result?; 919 | let (veilid_api2, mut update_rx2) = v2_result?; 920 | 921 | fs::create_dir_all(base_dir.as_ref()) 922 | .await 923 | .expect("Failed to create base directory"); 924 | 925 | let backend1 = Backend::from_dependencies( 926 | &base_dir.to_path_buf(), 927 | veilid_api1.clone(), 928 | update_rx1, 929 | store1, 930 | ) 931 | .await 932 | .unwrap(); 933 | 934 | let backend2 = Backend::from_dependencies( 935 | &base_dir.to_path_buf(), 936 | veilid_api2.clone(), 937 | update_rx2, 938 | store2, 939 | ) 940 | .await 941 | .unwrap(); 942 | 943 | // Create a group and a peer repo 944 | let mut group1 = backend1 945 | .create_group() 946 | .await 947 | .expect("Unable to create group"); 948 | 949 | let mut peer_repo = group1.create_repo().await?; 950 | 951 | // Upload a test blob to the peer repo 952 | let data_to_upload = Bytes::from("Test data for peer check"); 953 | let collection_name = "peer_repo_collection_check".to_string(); 954 | peer_repo 955 | .iroh_blobs 956 | .create_collection(&collection_name) 957 | .await 958 | .expect("Unable to create collection"); 959 | 960 | // Create a file stream using mpsc 961 | let (tx, rx) = mpsc::channel(1); 962 | tx.send(Ok(data_to_upload.clone())).await.unwrap(); 963 | drop(tx); // Close the sender 964 | 965 | let iroh_blobs = backend1 966 | .get_iroh_blobs() 967 | .await 968 | .expect("iroh_blobs not initialized"); 969 | 970 | // Upload using the new method `upload_to` 971 | let file_path = "test_file_check.txt".to_string(); 972 | let file_hash = iroh_blobs 973 | .upload_to(&collection_name, &file_path, rx) 974 | .await 975 | .expect("Failed to upload to collection"); 976 | 977 | // Add the uploaded file to the collection 978 | let new_file_collection_hash = iroh_blobs 979 | .set_file(&collection_name, &file_path, &file_hash) 980 | .await 981 | .expect("Unable to add file to collection"); 982 | assert!( 983 | !new_file_collection_hash.as_bytes().is_empty(), 984 | "New collection hash after uploading a file should not be empty" 985 | ); 986 | 987 | sleep(Duration::from_secs(4)).await; 988 | 989 | let joined_group = backend2 990 | .join_from_url(&group1.get_url()) 991 | .await 992 | .expect("Unable to join group on second peer"); 993 | 994 | assert!( 995 | !new_file_collection_hash.as_bytes().is_empty(), 996 | "New collection hash after uploading a file should not be empty" 997 | ); 998 | 999 | // Retry checking if peers have the hash 1000 | let mut retries = 4; 1001 | let mut peers_have = false; 1002 | while retries > 0 { 1003 | peers_have = joined_group 1004 | .peers_have_hash(&file_hash) 1005 | .await 1006 | .unwrap_or(false); 1007 | if peers_have { 1008 | break; 1009 | } 1010 | retries -= 1; 1011 | sleep(Duration::from_secs(4)).await; 1012 | } 1013 | 1014 | assert!(peers_have, "Peers should have the uploaded hash"); 1015 | 1016 | veilid_api1.shutdown().await; 1017 | veilid_api2.shutdown().await; 1018 | Ok(()) 1019 | } 1020 | 1021 | #[tokio::test] 1022 | #[serial] 1023 | async fn test_create_collection_and_upload_file_via_backend() -> Result<()> { 1024 | // Setup temporary directory for backend and veilid blobs 1025 | let path = TmpDir::new("test_backend_create_collection").await.unwrap(); 1026 | fs::create_dir_all(path.as_ref()) 1027 | .await 1028 | .expect("Failed to create base directory"); 1029 | 1030 | // Initialize the backend 1031 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 1032 | backend.start().await.expect("Unable to start"); 1033 | 1034 | // Step 1: Create a group via backend 1035 | let group = backend 1036 | .create_group() 1037 | .await 1038 | .expect("Unable to create group"); 1039 | 1040 | // Step 2: Create a collection via the backend's veilid_iroh_blobs instance 1041 | let collection_name = "test_collection".to_string(); 1042 | 1043 | let iroh_blobs = backend 1044 | .get_iroh_blobs() 1045 | .await 1046 | .expect("iroh_blobs not initialized"); 1047 | let collection_hash = iroh_blobs 1048 | .create_collection(&collection_name) 1049 | .await 1050 | .expect("Failed to create collection"); 1051 | 1052 | assert!( 1053 | !collection_hash.as_bytes().is_empty(), 1054 | "Collection hash should not be empty" 1055 | ); 1056 | 1057 | // Step 3: Upload a file to the collection 1058 | let file_path = path.as_ref().join("test_file.txt"); 1059 | let file_content = b"Test content for collection upload"; 1060 | fs::write(&file_path, file_content) 1061 | .await 1062 | .expect("Failed to write to file"); 1063 | 1064 | let file_hash = iroh_blobs 1065 | .upload_from_path(file_path.clone()) 1066 | .await 1067 | .expect("Failed to upload file"); 1068 | assert!( 1069 | !file_hash.as_bytes().is_empty(), 1070 | "File hash should not be empty" 1071 | ); 1072 | 1073 | // Step 4: Add the file to the collection 1074 | let updated_collection_hash = iroh_blobs 1075 | .set_file(&collection_name, "test_file.txt", &file_hash) 1076 | .await 1077 | .expect("Failed to set file in collection"); 1078 | 1079 | assert!( 1080 | !updated_collection_hash.as_bytes().is_empty(), 1081 | "Updated collection hash should not be empty" 1082 | ); 1083 | 1084 | // Step 5: Verify that the file is listed in the collection 1085 | let file_list = iroh_blobs 1086 | .list_files(&collection_name) 1087 | .await 1088 | .expect("Failed to list files in collection"); 1089 | assert_eq!( 1090 | file_list.len(), 1091 | 1, 1092 | "There should be one file in the collection" 1093 | ); 1094 | assert_eq!(file_list[0], "test_file.txt", "File name should match"); 1095 | 1096 | // Clean up 1097 | backend.stop().await.expect("Unable to stop backend"); 1098 | Ok(()) 1099 | } 1100 | 1101 | #[tokio::test] 1102 | #[serial] 1103 | async fn test_delete_file_from_collection_via_backend() -> Result<()> { 1104 | // Setup temporary directory for backend and veilid blobs 1105 | let path = TmpDir::new("test_backend_delete_file").await.unwrap(); 1106 | fs::create_dir_all(path.as_ref()) 1107 | .await 1108 | .expect("Failed to create base directory"); 1109 | 1110 | // Initialize the backend 1111 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 1112 | backend.start().await.expect("Unable to start"); 1113 | 1114 | // Step 1: Create a group via backend 1115 | let group = backend 1116 | .create_group() 1117 | .await 1118 | .expect("Unable to create group"); 1119 | 1120 | // Step 2: Create a collection via the backend's veilid_iroh_blobs instance 1121 | let collection_name = "test_delete_collection".to_string(); 1122 | 1123 | let iroh_blobs = backend 1124 | .get_iroh_blobs() 1125 | .await 1126 | .expect("iroh_blobs not initialized"); 1127 | 1128 | let collection_hash = iroh_blobs 1129 | .create_collection(&collection_name) 1130 | .await 1131 | .expect("Failed to create collection"); 1132 | 1133 | assert!( 1134 | !collection_hash.as_bytes().is_empty(), 1135 | "Collection hash should not be empty" 1136 | ); 1137 | 1138 | // Step 3: Upload a file to the collection 1139 | let file_path = path.as_ref().join("test_file_to_delete.txt"); 1140 | let file_content = b"File content to be deleted"; 1141 | fs::write(&file_path, file_content) 1142 | .await 1143 | .expect("Failed to write to file"); 1144 | 1145 | let file_hash = iroh_blobs 1146 | .upload_from_path(file_path.clone()) 1147 | .await 1148 | .expect("Failed to upload file"); 1149 | assert!( 1150 | !file_hash.as_bytes().is_empty(), 1151 | "File hash should not be empty" 1152 | ); 1153 | 1154 | // Step 4: Add the file to the collection 1155 | let updated_collection_hash = iroh_blobs 1156 | .set_file(&collection_name, "test_file_to_delete.txt", &file_hash) 1157 | .await 1158 | .expect("Failed to set file in collection"); 1159 | assert!( 1160 | !updated_collection_hash.as_bytes().is_empty(), 1161 | "Updated collection hash should not be empty" 1162 | ); 1163 | 1164 | // Step 5: Delete the file from the collection 1165 | let new_collection_hash = iroh_blobs 1166 | .delete_file(&collection_name, "test_file_to_delete.txt") 1167 | .await 1168 | .expect("Failed to delete file from collection"); 1169 | assert!( 1170 | !new_collection_hash.as_bytes().is_empty(), 1171 | "New collection hash after deletion should not be empty" 1172 | ); 1173 | 1174 | // Step 6: Verify that the file was deleted 1175 | let file_list_after_deletion = iroh_blobs 1176 | .list_files(&collection_name) 1177 | .await 1178 | .expect("Failed to list files in collection"); 1179 | assert!( 1180 | file_list_after_deletion.is_empty(), 1181 | "The collection should be empty after deleting the file" 1182 | ); 1183 | 1184 | // Clean up 1185 | backend.stop().await.expect("Unable to stop backend"); 1186 | Ok(()) 1187 | } 1188 | 1189 | #[tokio::test] 1190 | #[serial] 1191 | async fn test_repo_collection_management() -> Result<()> { 1192 | // Setup a temporary directory and initialize the backend 1193 | let path = TmpDir::new("test_repo_collection_management") 1194 | .await 1195 | .unwrap(); 1196 | fs::create_dir_all(path.as_ref()) 1197 | .await 1198 | .expect("Failed to create base directory"); 1199 | 1200 | // Initialize the backend 1201 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 1202 | backend.start().await.expect("Unable to start"); 1203 | 1204 | // Step 1: Create a group 1205 | let mut group = backend 1206 | .create_group() 1207 | .await 1208 | .expect("Failed to create group"); 1209 | 1210 | // Step 2: Create a repo and verify it can write (i.e., has a secret key) 1211 | let mut repo = group.create_repo().await.expect("Failed to create repo"); 1212 | 1213 | assert!(repo.can_write(), "Repo should have write access"); 1214 | 1215 | // Step 3: Set the repo name 1216 | let repo_name = "Test Repo"; 1217 | 1218 | repo.set_name(repo_name) 1219 | .await 1220 | .expect("Unable to set repo name"); 1221 | 1222 | // Step 5: Upload a file, which implicitly creates the collection 1223 | let file_name = "example.txt"; 1224 | let file_content = b"Test content for file upload"; 1225 | 1226 | // Upload the file (this will automatically create or get the collection) 1227 | let file_hash = repo.upload(file_name, file_content.to_vec()).await?; 1228 | assert!( 1229 | !file_hash.as_bytes().is_empty(), 1230 | "File hash should not be empty after upload" 1231 | ); 1232 | 1233 | // Step 6: Use iroh_blobs set_file to update the collection with the uploaded file 1234 | let iroh_blobs = backend 1235 | .get_iroh_blobs() 1236 | .await 1237 | .expect("iroh_blobs not initialized"); 1238 | 1239 | let collection_name = repo.get_name().await.expect("Failed to get repo name"); 1240 | let updated_collection_hash = repo 1241 | .set_file_and_update_dht(&collection_name, file_name, &file_hash) 1242 | .await?; 1243 | assert!( 1244 | !updated_collection_hash.as_bytes().is_empty(), 1245 | "Updated collection hash should not be empty after adding file" 1246 | ); 1247 | 1248 | // Step 7: Verify the file is listed in the collection 1249 | let file_list = repo.list_files().await?; 1250 | assert_eq!( 1251 | file_list.len(), 1252 | 1, 1253 | "There should be one file in the collection" 1254 | ); 1255 | assert_eq!( 1256 | file_list[0], file_name, 1257 | "The listed file should match the uploaded file" 1258 | ); 1259 | 1260 | // Step 8: Retrieve the file hash from the collection and verify it matches the uploaded hash 1261 | let retrieved_file_hash = repo.get_file_hash(file_name).await?; 1262 | assert_eq!( 1263 | file_hash, retrieved_file_hash, 1264 | "The retrieved file hash should match the uploaded file hash" 1265 | ); 1266 | 1267 | // Step 9: Delete the file from the collection 1268 | let collection_hash_after_deletion = repo.delete_file(file_name).await?; 1269 | assert!( 1270 | !collection_hash_after_deletion.as_bytes().is_empty(), 1271 | "Collection hash should not be empty after file deletion" 1272 | ); 1273 | 1274 | // Step 10: Verify the file is no longer listed in the collection 1275 | let file_list_after_deletion = repo.list_files().await?; 1276 | assert!( 1277 | file_list_after_deletion.is_empty(), 1278 | "The file list should be empty after deleting the file" 1279 | ); 1280 | 1281 | // Final Step -> Clean up 1282 | backend.stop().await.expect("Unable to stop backend"); 1283 | Ok(()) 1284 | } 1285 | 1286 | #[tokio::test] 1287 | #[serial] 1288 | async fn test_collection_hash_consistency() -> Result<()> { 1289 | // Setup temporary directory and initialize the backend 1290 | let path = TmpDir::new("test_backend_collection_hash_consistency") 1291 | .await 1292 | .unwrap(); 1293 | fs::create_dir_all(path.as_ref()) 1294 | .await 1295 | .expect("Failed to create base directory"); 1296 | 1297 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 1298 | backend.start().await.expect("Unable to start"); 1299 | 1300 | // Step 1: Create a group and a collection 1301 | let group = backend 1302 | .create_group() 1303 | .await 1304 | .expect("Unable to create group"); 1305 | let collection_name = "hash_consistency_collection".to_string(); 1306 | 1307 | let iroh_blobs = backend 1308 | .get_iroh_blobs() 1309 | .await 1310 | .expect("iroh_blobs not initialized"); 1311 | 1312 | // Step 2: Create collection and get initial hash 1313 | let initial_collection_hash = iroh_blobs 1314 | .create_collection(&collection_name) 1315 | .await 1316 | .expect("Failed to create collection"); 1317 | 1318 | // Step 3: Upload a file to the collection 1319 | let file_path = path.as_ref().join("file1.txt"); 1320 | let file_content = b"Content of file 1"; 1321 | fs::write(&file_path, file_content) 1322 | .await 1323 | .expect("Failed to write file 1"); 1324 | 1325 | let file_hash = iroh_blobs 1326 | .upload_from_path(file_path.clone()) 1327 | .await 1328 | .expect("Failed to upload file 1"); 1329 | let updated_collection_hash = iroh_blobs 1330 | .set_file(&collection_name, "file1.txt", &file_hash) 1331 | .await 1332 | .expect("Failed to set file in collection"); 1333 | 1334 | // Verify that the collection hash changed after adding a file 1335 | assert_ne!( 1336 | initial_collection_hash, updated_collection_hash, 1337 | "The collection hash should change after a file is added" 1338 | ); 1339 | 1340 | // Step 4: Remove the file and verify the hash changes again 1341 | let final_collection_hash = iroh_blobs 1342 | .delete_file(&collection_name, "file1.txt") 1343 | .await 1344 | .expect("Failed to delete file from collection"); 1345 | 1346 | assert_ne!( 1347 | updated_collection_hash, final_collection_hash, 1348 | "The collection hash should change after a file is removed" 1349 | ); 1350 | 1351 | // Clean up 1352 | backend.stop().await.expect("Unable to stop backend"); 1353 | Ok(()) 1354 | } 1355 | 1356 | #[tokio::test] 1357 | #[serial] 1358 | async fn test_rpc_service_init() -> Result<()> { 1359 | // Setup temporary directory and initialize the backend 1360 | let path = TmpDir::new("test_rpc_service_init").await.unwrap(); 1361 | fs::create_dir_all(path.as_ref()) 1362 | .await 1363 | .expect("Failed to create base directory"); 1364 | 1365 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 1366 | backend.start().await.expect("Unable to start"); 1367 | 1368 | let rpc_instance = RpcService::from_backend(&backend).await?; 1369 | 1370 | backend.stop().await.expect("Unable to stop backend"); 1371 | Ok(()) 1372 | } 1373 | 1374 | #[tokio::test] 1375 | #[serial] 1376 | async fn test_rpc_client() -> Result<()> { 1377 | // Setup temporary directory and initialize the backend 1378 | let path = TmpDir::new("test_rpc_client").await.unwrap(); 1379 | fs::create_dir_all(path.as_ref()) 1380 | .await 1381 | .expect("Failed to create base directory"); 1382 | 1383 | let (veilid2, _) = init_veilid( 1384 | &path.to_path_buf().join("client"), 1385 | "save-dweb-backup".to_string(), 1386 | ) 1387 | .await?; 1388 | 1389 | let mut backend = Backend::new(path.as_ref()).expect("Unable to create Backend"); 1390 | backend.start().await.expect("Unable to start"); 1391 | 1392 | let rpc_instance = RpcService::from_backend(&backend).await?; 1393 | 1394 | let rpc_instance_updater = RpcService::from_backend(&backend).await?; 1395 | 1396 | tokio::spawn(async move { 1397 | rpc_instance_updater.start_update_listener().await.unwrap(); 1398 | }); 1399 | 1400 | rpc_instance.set_name("Example").await?; 1401 | 1402 | let url = rpc_instance.get_descriptor_url(); 1403 | 1404 | tokio::time::sleep(Duration::from_secs(2)).await; 1405 | 1406 | let client = RpcClient::from_veilid(veilid2.clone(), &url).await?; 1407 | 1408 | let name = client.get_name().await?; 1409 | 1410 | assert_eq!(name, "Example", "Unable to get name"); 1411 | 1412 | let list = client.list_groups().await?; 1413 | 1414 | assert_eq!(list.group_ids.len(), 0, "No groups on init"); 1415 | 1416 | backend.stop().await.expect("Unable to stop backend"); 1417 | veilid2.shutdown().await; 1418 | Ok(()) 1419 | } 1420 | } 1421 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![recursion_limit = "256"] 2 | use crate::backend::Backend; 3 | use crate::common::{init_veilid, CommonKeypair, DHTEntity}; 4 | use crate::constants::{UNABLE_TO_GET_GROUP_NAME, UNABLE_TO_SET_GROUP_NAME}; 5 | use crate::group::Group; 6 | use crate::repo::Repo; 7 | use crate::rpc::{JoinGroupRequest, RemoveGroupRequest}; 8 | use crate::rpc::{RpcClient, RpcService}; 9 | use anyhow::{anyhow, Result}; 10 | use clap::{Arg, ArgAction, Command, Subcommand}; 11 | use std::sync::Arc; 12 | use tokio::fs; 13 | use tokio::sync::Mutex; 14 | use tracing::error; 15 | use xdg::BaseDirectories; 16 | 17 | mod backend; 18 | mod common; 19 | mod constants; 20 | mod group; 21 | mod repo; 22 | mod rpc; 23 | 24 | #[derive(Subcommand)] 25 | enum Commands { 26 | Join { 27 | #[arg(long)] 28 | group_url: String, 29 | }, 30 | Remove { 31 | #[arg(long)] 32 | group_id: String, 33 | }, 34 | List, 35 | Start, 36 | } 37 | 38 | async fn setup_rpc_client( 39 | base_dir: &std::path::Path, 40 | backend_url: &str, 41 | ) -> anyhow::Result { 42 | let (veilid_api, _update_rx) = 43 | init_veilid(base_dir, "save-dweb-backup-client".to_string()).await?; 44 | RpcClient::from_veilid(veilid_api, backend_url).await 45 | } 46 | 47 | #[tokio::main] 48 | async fn main() -> anyhow::Result<()> { 49 | let matches = Command::new("Save DWeb Backend") 50 | .arg( 51 | Arg::new("backend_url") 52 | .long("backend-url") 53 | .help("URL of the backend") 54 | .required(false) 55 | .global(true), 56 | ) 57 | .subcommand( 58 | Command::new("join").about("Join a group").arg( 59 | Arg::new("group_url") 60 | .long("group-url") 61 | .help("URL of the group to join") 62 | .required(true), 63 | ), 64 | ) 65 | .subcommand( 66 | Command::new("remove").about("Remove a group").arg( 67 | Arg::new("group_id") 68 | .long("group-id") 69 | .help("ID of the group to remove") 70 | .required(true), 71 | ), 72 | ) 73 | .subcommand(Command::new("list").about("List known groups")) 74 | .subcommand(Command::new("start").about("Start the RPC service and log the URL")) 75 | .get_matches(); 76 | 77 | let backend_url = matches.get_one::("backend_url"); 78 | 79 | let xdg_dirs = BaseDirectories::with_prefix("save-dweb-backend")?; 80 | let base_dir = xdg_dirs.get_data_home(); 81 | 82 | fs::create_dir_all(&base_dir) 83 | .await 84 | .expect("Failed to create base directory"); 85 | 86 | let mut backend = Backend::new(&base_dir)?; 87 | 88 | match matches.subcommand() { 89 | Some(("join", sub_matches)) => { 90 | let backend_url = matches.get_one::("backend_url").ok_or_else(|| { 91 | anyhow!("Error: --backend-url is required for the 'join' command") 92 | })?; 93 | 94 | let group_url = sub_matches.get_one::("group_url").unwrap(); 95 | println!("Joining group: {}", group_url); 96 | 97 | let rpc_client = setup_rpc_client(&base_dir, backend_url).await?; 98 | 99 | rpc_client.join_group(group_url.to_string()).await?; 100 | println!("Successfully joined group."); 101 | } 102 | Some(("list", _)) => { 103 | let backend_url = matches.get_one::("backend_url").ok_or_else(|| { 104 | anyhow!("Error: --backend-url is required for the 'list' command") 105 | })?; 106 | 107 | println!("Listing all groups..."); 108 | 109 | let rpc_client = setup_rpc_client(&base_dir, backend_url).await?; 110 | 111 | let response = rpc_client.list_groups().await?; 112 | for group_id in response.group_ids { 113 | println!("Group ID: {}", group_id); 114 | } 115 | } 116 | Some(("remove", sub_matches)) => { 117 | let backend_url = matches.get_one::("backend_url").ok_or_else(|| { 118 | anyhow!("Error: --backend-url is required for the 'remove' command") 119 | })?; 120 | 121 | let group_id = sub_matches.get_one::("group_id").unwrap(); 122 | println!("Removing group: {}", group_id); 123 | 124 | let rpc_client = setup_rpc_client(&base_dir, backend_url).await?; 125 | 126 | rpc_client.remove_group(group_id.to_string()).await?; 127 | println!("Successfully removed group."); 128 | } 129 | Some(("start", _)) => { 130 | backend.start().await?; 131 | let rpc_service = RpcService::from_backend(&backend).await?; 132 | println!( 133 | "RPC service started at URL: {}", 134 | rpc_service.get_descriptor_url() 135 | ); 136 | rpc_service.start_update_listener().await?; 137 | } 138 | _ => { 139 | // Otherwise, start the normal backend and group operations 140 | backend.start().await?; 141 | tokio::signal::ctrl_c().await?; 142 | backend.stop().await?; 143 | } 144 | } 145 | Ok(()) 146 | } 147 | -------------------------------------------------------------------------------- /src/repo.rs: -------------------------------------------------------------------------------- 1 | use crate::common::DHTEntity; 2 | use anyhow::{anyhow, Result}; 3 | use async_stream::stream; 4 | use bytes::{BufMut, Bytes, BytesMut}; 5 | use core::hash; 6 | use futures_core::stream::Stream; 7 | use hex::decode; 8 | use iroh_blobs::Hash; 9 | use serde::{Deserialize, Serialize}; 10 | use serde_cbor::from_slice; 11 | use std::collections::HashMap; 12 | use std::sync::Arc; 13 | use std::{io::ErrorKind, path::PathBuf}; 14 | use tokio::sync::{broadcast, mpsc}; 15 | use tokio_stream::wrappers::ReceiverStream; 16 | use veilid_core::{ 17 | CryptoKey, CryptoSystemVLD0, CryptoTyped, DHTRecordDescriptor, ProtectedStore, RoutingContext, 18 | SharedSecret, Target, VeilidAPI, VeilidUpdate, 19 | }; 20 | use veilid_iroh_blobs::iroh::VeilidIrohBlobs; 21 | 22 | pub const HASH_SUBKEY: u32 = 1; 23 | pub const ROUTE_SUBKEY: u32 = 2; 24 | 25 | #[derive(Clone)] 26 | pub struct Repo { 27 | pub dht_record: DHTRecordDescriptor, 28 | pub encryption_key: SharedSecret, 29 | pub secret_key: Option>, 30 | pub routing_context: RoutingContext, 31 | pub veilid: VeilidAPI, 32 | pub iroh_blobs: VeilidIrohBlobs, 33 | } 34 | 35 | impl Repo { 36 | pub fn new( 37 | dht_record: DHTRecordDescriptor, 38 | encryption_key: SharedSecret, 39 | secret_key: Option>, 40 | routing_context: RoutingContext, 41 | veilid: VeilidAPI, 42 | iroh_blobs: VeilidIrohBlobs, 43 | ) -> Self { 44 | Self { 45 | dht_record, 46 | encryption_key, 47 | secret_key, 48 | routing_context, 49 | veilid, 50 | iroh_blobs, 51 | } 52 | } 53 | 54 | pub fn id(&self) -> CryptoKey { 55 | self.dht_record.key().value.clone() 56 | } 57 | 58 | pub fn can_write(&self) -> bool { 59 | self.secret_key.is_some() 60 | } 61 | 62 | pub async fn update_route_on_dht(&self) -> Result<()> { 63 | let route_id_blob = self.iroh_blobs.route_id_blob().await; 64 | 65 | // Set the root hash in the DHT record 66 | self.routing_context 67 | .set_dht_value( 68 | self.dht_record.key().clone(), 69 | ROUTE_SUBKEY, 70 | route_id_blob, 71 | None, 72 | ) 73 | .await 74 | .map_err(|e| anyhow!("Failed to store route ID blob in DHT: {}", e))?; 75 | 76 | Ok(()) 77 | } 78 | 79 | pub async fn has_hash(&self, hash: &Hash) -> Result { 80 | if self.can_write() { 81 | Ok(self.iroh_blobs.has_hash(hash).await) 82 | } else { 83 | let route_id = self.get_route_id_blob().await?; 84 | self.iroh_blobs.ask_hash(route_id, *hash).await 85 | } 86 | } 87 | 88 | pub async fn get_route_id_blob(&self) -> Result> { 89 | if self.can_write() { 90 | return Ok(self.iroh_blobs.route_id_blob().await); 91 | } 92 | 93 | let value = self 94 | .routing_context 95 | .get_dht_value(self.dht_record.key().clone(), ROUTE_SUBKEY, true) 96 | .await? 97 | .ok_or_else(|| anyhow!("Unable to get DHT value for route id blob"))? 98 | .data() 99 | .to_vec(); 100 | 101 | Ok(value) 102 | } 103 | 104 | pub async fn get_file_stream( 105 | &self, 106 | file_name: &str, 107 | ) -> Result>> { 108 | let hash = self.get_file_hash(file_name).await?; 109 | // download the blob 110 | let receiver = self.iroh_blobs.read_file(hash.clone()).await?; 111 | 112 | let stream = ReceiverStream::new(receiver); 113 | 114 | Ok(stream) 115 | } 116 | 117 | pub async fn update_hash_on_dht(&self, hash: &Hash) -> Result<()> { 118 | // Convert hash to hex for DHT storage 119 | let root_hash_hex = hash.to_hex(); 120 | // Set the root hash in the DHT record 121 | self.routing_context 122 | .set_dht_value( 123 | self.dht_record.key().clone(), 124 | HASH_SUBKEY, 125 | root_hash_hex.clone().into(), 126 | None, 127 | ) 128 | .await 129 | .map_err(|e| anyhow!("Failed to store collection blob in DHT: {}", e))?; 130 | 131 | Ok(()) 132 | } 133 | 134 | pub async fn get_hash_from_dht(&self) -> Result { 135 | let value = self 136 | .routing_context 137 | .get_dht_value(self.dht_record.key().clone(), HASH_SUBKEY, true) 138 | .await? 139 | .ok_or_else(|| anyhow!("Unable to get DHT value for repo root hash"))?; 140 | 141 | let data = value.data(); 142 | 143 | // Decode the hex string (64 bytes) into a 32-byte hash 144 | let decoded_hash = decode(data).expect("Failed to decode hex string"); 145 | 146 | // Ensure the decoded hash is 32 bytes 147 | if decoded_hash.len() != 32 { 148 | panic!( 149 | "Expected a 32-byte hash after decoding, but got {} bytes", 150 | decoded_hash.len() 151 | ); 152 | } 153 | let mut hash_raw: [u8; 32] = [0; 32]; 154 | hash_raw.copy_from_slice(&decoded_hash); 155 | 156 | // Now create the Hash object 157 | let hash = Hash::from_bytes(hash_raw); 158 | 159 | Ok(hash) 160 | } 161 | 162 | pub async fn update_collection_on_dht(&self) -> Result<()> { 163 | let collection_hash = self.get_collection_hash().await?; 164 | self.update_hash_on_dht(&collection_hash).await 165 | } 166 | 167 | pub async fn upload_blob(&self, file_path: PathBuf) -> Result { 168 | if !self.can_write() { 169 | return Err(anyhow!("Cannot upload blob, repo is not writable")); 170 | } 171 | // Use repo id as key for a collection 172 | // Upload the file and get the hash 173 | let hash = self.iroh_blobs.upload_from_path(file_path).await?; 174 | 175 | self.update_hash_on_dht(&hash).await?; 176 | Ok(hash) 177 | } 178 | 179 | // Method to get or create a collection associated with the repo 180 | async fn get_or_create_collection(&self) -> Result { 181 | if !self.can_write() { 182 | // Try to get the collection hash from the DHT (remote or unwritable repos) 183 | if let Ok(collection_hash) = self.get_hash_from_dht().await { 184 | // The collection hash is found, return it directly (no need for a name) 185 | println!("Collection hash found in DHT: {:?}", collection_hash); 186 | return Ok(collection_hash); 187 | } else { 188 | // Error if we're trying to create a collection in a read-only repo 189 | return Err(anyhow::Error::msg( 190 | "Collection not found and cannot create in read-only repo", 191 | )); 192 | } 193 | } 194 | // If the repo is writable, check if the collection exists 195 | let collection_name = self.get_name().await?; 196 | if let Ok(collection_hash) = self.iroh_blobs.collection_hash(&collection_name).await { 197 | // Collection exists, return the hash 198 | println!("Collection hash found in store: {:?}", collection_hash); 199 | Ok(collection_hash) 200 | } else { 201 | // Create a new collection 202 | println!("Creating new collection..."); 203 | let new_hash = match self.iroh_blobs.create_collection(&collection_name).await { 204 | Ok(hash) => { 205 | println!("New collection created with hash: {:?}", hash); 206 | hash 207 | } 208 | Err(e) => { 209 | eprintln!("Failed to create collection: {:?}", e); 210 | return Err(e); 211 | } 212 | }; 213 | 214 | // Update the DHT with the new collection hash 215 | if let Err(e) = self.update_collection_on_dht().await { 216 | eprintln!("Failed to update DHT: {:?}", e); 217 | return Err(e); 218 | } 219 | 220 | // Return the new collection hash 221 | Ok(new_hash) 222 | } 223 | } 224 | 225 | // Method to retrieve a file's hash from the collection 226 | pub async fn get_file_hash(&self, file_name: &str) -> Result { 227 | // Ensure the collection exists before reading 228 | let collection_hash = self.get_or_create_collection().await?; 229 | 230 | self.iroh_blobs 231 | .get_file_from_collection_hash(&collection_hash, file_name) 232 | .await 233 | } 234 | 235 | pub async fn list_files(&self) -> Result> { 236 | if self.can_write() { 237 | let hash = self.get_or_create_collection().await?; 238 | self.list_files_from_collection_hash(&hash).await 239 | } else { 240 | let got_hash = self.get_hash_from_dht().await; 241 | 242 | // Return empty list if we can't fetch from the DHT 243 | if got_hash.is_err() { 244 | Ok(Vec::new()) 245 | } else { 246 | self.list_files_from_collection_hash(&got_hash.unwrap()) 247 | .await 248 | } 249 | } 250 | } 251 | 252 | pub async fn list_files_from_collection_hash( 253 | &self, 254 | collection_hash: &Hash, 255 | ) -> Result> { 256 | let file_list = self 257 | .iroh_blobs 258 | .list_files_from_hash(collection_hash) 259 | .await?; 260 | 261 | Ok(file_list) 262 | } 263 | 264 | // Method to delete a file from the collection 265 | pub async fn delete_file(&self, file_name: &str) -> Result { 266 | self.check_write_permissions()?; 267 | 268 | // Ensure the collection exists before deleting a file 269 | let collection_hash = self.get_or_create_collection().await?; 270 | 271 | // Delete the file from the collection and get the new collection hash 272 | let deleted_hash = self 273 | .iroh_blobs 274 | .delete_file_from_collection_hash(&collection_hash, file_name) 275 | .await?; 276 | 277 | // Persist the new collection hash with the name to the store 278 | let collection_name = self.get_name().await?; 279 | self.iroh_blobs 280 | .persist_collection_with_name(&collection_name, &deleted_hash) 281 | .await?; 282 | 283 | // Update the DHT with the new collection hash 284 | self.update_collection_on_dht().await?; 285 | 286 | Ok(deleted_hash) 287 | } 288 | 289 | // Method to get the collection's hash 290 | async fn get_collection_hash(&self) -> Result { 291 | let collection_name = self.get_name().await?; 292 | 293 | self.iroh_blobs.collection_hash(&collection_name).await 294 | } 295 | 296 | pub async fn upload(&self, file_name: &str, data_to_upload: Vec) -> Result { 297 | self.check_write_permissions()?; 298 | 299 | // Ensure the collection exists before uploading 300 | let collection_hash = self.get_or_create_collection().await?; 301 | 302 | // Use the repo name 303 | let collection_name = self.get_name().await?; 304 | let (tx, rx) = mpsc::channel::>(1); 305 | tx.send(Ok(Bytes::from(data_to_upload.clone()))) 306 | .await 307 | .unwrap(); 308 | drop(tx); 309 | 310 | let file_hash = self 311 | .iroh_blobs 312 | .upload_to(&collection_name, file_name, rx) 313 | .await?; 314 | 315 | // Persist the new collection hash with the name to the store 316 | self.iroh_blobs 317 | .persist_collection_with_name(&collection_name, &file_hash) 318 | .await?; 319 | 320 | // Update the collection hash on the DHT 321 | self.update_collection_on_dht().await?; 322 | 323 | Ok(file_hash) 324 | } 325 | 326 | pub async fn set_file_and_update_dht( 327 | &self, 328 | collection_name: &str, 329 | file_name: &str, 330 | file_hash: &Hash, 331 | ) -> Result { 332 | // Step 1: Update the collection with the new file using `set_file` 333 | let updated_collection_hash = self 334 | .iroh_blobs 335 | .set_file(collection_name, file_name, file_hash) 336 | .await?; 337 | println!("Updated collection hash: {:?}", updated_collection_hash); 338 | 339 | // Step 2: Persist the new collection hash locally 340 | self.iroh_blobs 341 | .persist_collection_with_name(collection_name, &updated_collection_hash) 342 | .await?; 343 | println!( 344 | "Collection persisted with new hash: {:?}", 345 | updated_collection_hash 346 | ); 347 | 348 | // Step 3: Update the DHT with the new collection hash 349 | self.update_collection_on_dht().await?; 350 | println!( 351 | "DHT updated with new collection hash: {:?}", 352 | updated_collection_hash 353 | ); 354 | 355 | Ok(updated_collection_hash) 356 | } 357 | 358 | // Helper method to check if the repo can write 359 | fn check_write_permissions(&self) -> Result<()> { 360 | if !self.can_write() { 361 | return Err(anyhow::Error::msg("Repo does not have write permissions")); 362 | } 363 | Ok(()) 364 | } 365 | } 366 | 367 | impl DHTEntity for Repo { 368 | fn get_id(&self) -> CryptoKey { 369 | self.id().clone() 370 | } 371 | 372 | fn get_encryption_key(&self) -> SharedSecret { 373 | self.encryption_key.clone() 374 | } 375 | 376 | fn get_routing_context(&self) -> RoutingContext { 377 | self.routing_context.clone() 378 | } 379 | 380 | fn get_veilid_api(&self) -> VeilidAPI { 381 | self.veilid.clone() 382 | } 383 | 384 | fn get_dht_record(&self) -> DHTRecordDescriptor { 385 | self.dht_record.clone() 386 | } 387 | 388 | fn get_secret_key(&self) -> Option { 389 | self.secret_key.clone().map(|key| key.value) 390 | } 391 | } 392 | -------------------------------------------------------------------------------- /src/rpc.rs: -------------------------------------------------------------------------------- 1 | use crate::backend::{crypto_key_from_query, Backend}; 2 | use crate::common::DHTEntity; 3 | use crate::group::Group; 4 | use crate::repo::Repo; 5 | use crate::{ 6 | constants::ROUTE_ID_DHT_KEY, 7 | group::{PROTOCOL_SCHEME, URL_DHT_KEY, URL_ENCRYPTION_KEY}, 8 | }; 9 | 10 | use anyhow::{anyhow, Result}; 11 | use base64::engine::general_purpose::URL_SAFE_NO_PAD; 12 | use base64::Engine as _; 13 | use futures::StreamExt; 14 | use hex::ToHex; 15 | use iroh_blobs::Hash; 16 | use serde::{Deserialize, Serialize}; 17 | use serde_cbor::{from_slice, to_vec}; 18 | use std::convert::TryInto; 19 | use std::sync::Arc; 20 | use std::vec; 21 | use tokio::sync::broadcast::error::RecvError; 22 | use tracing::{error, info}; 23 | use url::Url; 24 | use veilid_core::{ 25 | vld0_generate_keypair, CryptoKey, CryptoSystem, CryptoSystemVLD0, DHTRecordDescriptor, 26 | DHTSchema, KeyPair, RoutingContext, SharedSecret, Target, TypedKey, VeilidAPI, VeilidAppCall, 27 | VeilidUpdate, CRYPTO_KIND_VLD0, 28 | }; 29 | use veilid_iroh_blobs::tunnels::OnNewRouteCallback; 30 | 31 | const MESSAGE_TYPE_JOIN_GROUP: u8 = 0x00; 32 | const MESSAGE_TYPE_LIST_GROUPS: u8 = 0x01; 33 | const MESSAGE_TYPE_REMOVE_GROUP: u8 = 0x02; 34 | const MESSAGE_TYPE_ERROR: u8 = 0xFF; 35 | 36 | const ROUTE_SUBKEY: u32 = 1; 37 | 38 | #[repr(u8)] 39 | #[derive(Serialize, Deserialize)] 40 | enum MessageType { 41 | JoinGroup = MESSAGE_TYPE_JOIN_GROUP, 42 | ListGroups = MESSAGE_TYPE_LIST_GROUPS, 43 | RemoveGroup = MESSAGE_TYPE_REMOVE_GROUP, 44 | } 45 | 46 | #[derive(Serialize, Deserialize)] 47 | pub struct JoinGroupRequest { 48 | pub group_url: String, 49 | } 50 | 51 | #[derive(Serialize, Deserialize)] 52 | pub struct JoinGroupResponse { 53 | status_message: String, 54 | } 55 | 56 | #[derive(Serialize, Deserialize)] 57 | pub struct ListGroupsRequest; 58 | 59 | #[derive(Serialize, Deserialize)] 60 | pub struct ListGroupsResponse { 61 | pub group_ids: Vec, 62 | } 63 | 64 | #[derive(Serialize, Deserialize)] 65 | pub struct RemoveGroupRequest { 66 | pub group_id: String, 67 | } 68 | 69 | #[derive(Serialize, Deserialize)] 70 | pub struct RemoveGroupResponse { 71 | status_message: String, 72 | } 73 | 74 | #[derive(Clone)] 75 | pub struct RpcService { 76 | backend: Backend, 77 | descriptor: RpcServiceDescriptor, 78 | } 79 | 80 | #[derive(Serialize, Deserialize)] 81 | pub struct RpcResponse { 82 | pub success: Option, 83 | pub error: Option, 84 | } 85 | 86 | // Just used for app calls 87 | pub struct RpcClient { 88 | veilid: VeilidAPI, 89 | routing_context: RoutingContext, 90 | descriptor: RpcServiceDescriptor, 91 | } 92 | 93 | pub fn parse_url_for_rpc(url_string: &str) -> Result { 94 | let url = Url::parse(url_string)?; 95 | 96 | let dht_key = crypto_key_from_query(&url, URL_DHT_KEY) 97 | .map_err(|_| anyhow!("Missing 'dht' key in the URL"))?; 98 | let encryption_key = crypto_key_from_query(&url, URL_ENCRYPTION_KEY) 99 | .map_err(|_| anyhow!("Missing 'enc' key in the URL"))?; 100 | 101 | Ok(RpcKeys { 102 | dht_key, 103 | encryption_key, 104 | }) 105 | } 106 | 107 | impl RpcClient { 108 | pub async fn from_veilid(veilid: VeilidAPI, url: &str) -> Result { 109 | let routing_context = veilid.routing_context()?; 110 | let crypto_system = veilid 111 | .crypto()? 112 | .get(CRYPTO_KIND_VLD0) 113 | .ok_or_else(|| anyhow!("Unable to init crypto system")); 114 | 115 | let descriptor = 116 | RpcServiceDescriptor::from_url(routing_context.clone(), veilid.clone(), url).await?; 117 | 118 | Ok(RpcClient { 119 | veilid, 120 | routing_context, 121 | descriptor, 122 | }) 123 | } 124 | 125 | async fn send_rpc_request Deserialize<'de>>( 126 | &self, 127 | request: &T, 128 | message_type: u8, 129 | ) -> Result { 130 | // Serialize the request 131 | let message = serde_cbor::to_vec(request)?; 132 | 133 | // Get the route ID blob and target 134 | let blob = self.descriptor.get_route_id_blob().await?; 135 | let route_id = self.veilid.import_remote_private_route(blob)?; 136 | let target = Target::PrivateRoute(route_id); 137 | 138 | // Prefix the message type byte 139 | let mut payload = vec![message_type]; 140 | payload.extend_from_slice(&message); 141 | 142 | // Send the app call and wait for the response 143 | let response = self.routing_context.app_call(target, payload).await?; 144 | 145 | // Ensure the response is not empty 146 | if response.is_empty() { 147 | return Err(anyhow!("Empty response received from RPC call")); 148 | } 149 | 150 | // Extract the message type byte and payload 151 | let response_message_type = response[0]; 152 | let payload = &response[1..]; 153 | 154 | // Check the response message type 155 | if response_message_type == MESSAGE_TYPE_ERROR { 156 | // Parse and handle an error response 157 | let rpc_response: RpcResponse<()> = serde_cbor::from_slice(payload)?; 158 | if let Some(err) = rpc_response.error { 159 | return Err(anyhow!("RPC Error: {}", err)); 160 | } else { 161 | return Err(anyhow!("Unknown error format in RPC response")); 162 | } 163 | } 164 | 165 | if response_message_type != message_type { 166 | return Err(anyhow!( 167 | "Unexpected message type in response. Expected: {}, Got: {}", 168 | message_type, 169 | response_message_type 170 | )); 171 | } 172 | 173 | // Parse the response payload into RpcResponse 174 | let rpc_response: RpcResponse = serde_cbor::from_slice(payload)?; 175 | 176 | // Handle success responses 177 | if let Some(data) = rpc_response.success { 178 | return Ok(data); 179 | } 180 | 181 | // If neither success nor error is present, the response is invalid 182 | Err(anyhow!( 183 | "RPC Response is missing both success and error fields" 184 | )) 185 | } 186 | 187 | pub async fn get_name(&self) -> Result { 188 | self.descriptor.get_name().await 189 | } 190 | 191 | pub async fn join_group(&self, group_url: String) -> Result { 192 | let request = JoinGroupRequest { group_url }; 193 | self.send_rpc_request(&request, MESSAGE_TYPE_JOIN_GROUP) 194 | .await 195 | } 196 | 197 | pub async fn list_groups(&self) -> Result { 198 | let request = ListGroupsRequest; 199 | self.send_rpc_request(&request, MESSAGE_TYPE_LIST_GROUPS) 200 | .await 201 | } 202 | 203 | pub async fn remove_group(&self, group_id: String) -> Result { 204 | let request = RemoveGroupRequest { group_id }; 205 | self.send_rpc_request(&request, MESSAGE_TYPE_REMOVE_GROUP) 206 | .await 207 | } 208 | } 209 | 210 | #[derive(Clone)] 211 | pub struct RpcKeys { 212 | pub dht_key: CryptoKey, 213 | pub encryption_key: SharedSecret, 214 | } 215 | 216 | #[derive(Clone)] 217 | pub struct RpcServiceDescriptor { 218 | keypair: RpcKeys, 219 | routing_context: RoutingContext, 220 | veilid: VeilidAPI, 221 | dht_record: DHTRecordDescriptor, 222 | } 223 | 224 | impl RpcServiceDescriptor { 225 | pub async fn from_url( 226 | routing_context: RoutingContext, 227 | veilid: VeilidAPI, 228 | url: &str, 229 | ) -> Result { 230 | let keys = parse_url_for_rpc(url)?; 231 | 232 | let record_key = TypedKey::new(CRYPTO_KIND_VLD0, keys.dht_key); 233 | 234 | let dht_record = routing_context 235 | .open_dht_record(record_key.clone(), None) 236 | .await 237 | .map_err(|e| anyhow!("Failed to open DHT record: {}", e))?; 238 | 239 | let owner_key = dht_record.owner(); 240 | 241 | Ok(RpcServiceDescriptor { 242 | keypair: keys, 243 | routing_context, 244 | veilid, 245 | dht_record, 246 | }) 247 | } 248 | 249 | pub fn get_url(&self) -> String { 250 | let mut url = Url::parse(format!("{0}:?", PROTOCOL_SCHEME).as_str()).unwrap(); 251 | 252 | url.query_pairs_mut() 253 | .append_pair(URL_DHT_KEY, self.get_id().encode_hex::().as_str()) 254 | .append_pair( 255 | URL_ENCRYPTION_KEY, 256 | self.get_encryption_key().encode_hex::().as_str(), 257 | ) 258 | .append_key_only("rpc"); 259 | 260 | let url_string = url.to_string(); 261 | info!("Descriptor URL: {}", url_string); 262 | url_string 263 | } 264 | pub async fn get_route_id_blob(&self) -> Result> { 265 | let value = self 266 | .routing_context 267 | .get_dht_value(self.dht_record.key().clone(), ROUTE_SUBKEY, true) 268 | .await? 269 | .ok_or_else(|| anyhow!("Unable to get DHT value for route id blob"))? 270 | .data() 271 | .to_vec(); 272 | Ok(value) 273 | } 274 | 275 | pub async fn update_route_on_dht(&self, route_id_blob: Vec) -> Result<()> { 276 | // Set the root hash in the DHT record 277 | self.routing_context 278 | .set_dht_value( 279 | self.dht_record.key().clone(), 280 | ROUTE_SUBKEY, 281 | route_id_blob, 282 | None, 283 | ) 284 | .await 285 | .map_err(|e| anyhow!("Failed to store route ID blob in DHT: {}", e))?; 286 | 287 | Ok(()) 288 | } 289 | } 290 | 291 | impl RpcService { 292 | pub async fn from_backend(backend: &Backend) -> Result { 293 | let backend = backend.clone(); 294 | let veilid = backend 295 | .get_veilid_api() 296 | .await 297 | .ok_or_else(|| anyhow!("Backend not started"))?; 298 | 299 | let routing_context = veilid.routing_context()?; 300 | let schema = DHTSchema::dflt(2)?; // Title + Route Id 301 | let kind = Some(CRYPTO_KIND_VLD0); 302 | 303 | // TODO: try loading from protected store before creating 304 | let dht_record = routing_context 305 | .create_dht_record(schema, None, kind) 306 | .await?; 307 | let crypto = veilid.crypto()?; 308 | let crypto_system = crypto 309 | .get(CRYPTO_KIND_VLD0) 310 | .ok_or_else(|| anyhow!("Unable to init crypto system"))?; 311 | 312 | let encryption_key = crypto_system.random_shared_secret(); 313 | 314 | let keypair = RpcKeys { 315 | dht_key: dht_record.key().value.clone(), 316 | encryption_key, 317 | }; 318 | 319 | let descriptor = RpcServiceDescriptor { 320 | keypair, 321 | routing_context, 322 | veilid: veilid.clone(), 323 | dht_record, 324 | }; 325 | 326 | let updatable_descriptor = descriptor.clone(); 327 | 328 | let on_new_route_callback: OnNewRouteCallback = Arc::new(move |route_id, route_id_blob| { 329 | let updatable_descriptor = updatable_descriptor.clone(); 330 | 331 | tokio::spawn(async move { 332 | if let Err(err) = updatable_descriptor 333 | .update_route_on_dht(route_id_blob) 334 | .await 335 | { 336 | eprintln!( 337 | "Unable to update route after rebuild for RPC service: {}", 338 | err 339 | ); 340 | } 341 | }); 342 | }); 343 | 344 | let route_id_blob = backend.get_route_id_blob().await?; 345 | descriptor.update_route_on_dht(route_id_blob).await?; 346 | 347 | Ok(RpcService { 348 | backend, 349 | descriptor, 350 | }) 351 | } 352 | 353 | pub fn get_descriptor_url(&self) -> String { 354 | self.descriptor.get_url() 355 | } 356 | 357 | // Start listening for AppCall events. 358 | pub async fn start_update_listener(&self) -> Result<()> { 359 | // Subscribe to updates from the backend 360 | let mut update_rx = self 361 | .backend 362 | .subscribe_updates() 363 | .await 364 | .ok_or_else(|| anyhow!("Failed to subscribe to updates"))?; 365 | 366 | // Listen for incoming updates and handle AppCall 367 | loop { 368 | match update_rx.recv().await { 369 | Ok(update) => { 370 | if let VeilidUpdate::AppCall(app_call) = update { 371 | let app_call_clone = app_call.clone(); 372 | 373 | if let Err(e) = self.handle_app_call(*app_call).await { 374 | error!("Error processing AppCall: {}", e); 375 | 376 | // Wrap the error in RpcResponse and send it 377 | let error_response: RpcResponse<()> = RpcResponse { 378 | success: None, 379 | error: Some(e.to_string()), 380 | }; 381 | if let Err(err) = self 382 | .send_response( 383 | app_call_clone.id().into(), 384 | MESSAGE_TYPE_ERROR, 385 | &error_response, 386 | ) 387 | .await 388 | { 389 | error!("Failed to send error response: {}", err); 390 | } 391 | } 392 | } 393 | } 394 | Err(RecvError::Lagged(count)) => { 395 | error!("Missed {} updates", count); 396 | } 397 | Err(RecvError::Closed) => { 398 | error!("Update channel closed"); 399 | break; 400 | } 401 | } 402 | } 403 | 404 | Ok(()) 405 | } 406 | 407 | async fn handle_app_call(&self, app_call: VeilidAppCall) -> Result<()> { 408 | let call_id = app_call.id(); 409 | let message = app_call.message(); 410 | 411 | if message.is_empty() { 412 | let error_response: RpcResponse<()> = RpcResponse { 413 | success: None, 414 | error: Some("Empty message".to_string()), 415 | }; 416 | self.send_response(call_id.into(), MESSAGE_TYPE_ERROR, &error_response) 417 | .await?; 418 | return Err(anyhow!("Empty message")); 419 | } 420 | 421 | let message_type_byte = message[0]; 422 | let payload = &message[1..]; 423 | 424 | match message_type_byte { 425 | MESSAGE_TYPE_JOIN_GROUP => { 426 | let request: JoinGroupRequest = serde_cbor::from_slice(payload)?; 427 | let response = self.join_group(request).await; 428 | self.send_response(call_id.into(), MESSAGE_TYPE_JOIN_GROUP, &response) 429 | .await?; 430 | } 431 | MESSAGE_TYPE_LIST_GROUPS => { 432 | let response = self.list_groups().await; 433 | self.send_response(call_id.into(), MESSAGE_TYPE_LIST_GROUPS, &response) 434 | .await?; 435 | } 436 | MESSAGE_TYPE_REMOVE_GROUP => { 437 | let request: RemoveGroupRequest = serde_cbor::from_slice(payload)?; 438 | let response = self.remove_group(request).await; 439 | self.send_response(call_id.into(), MESSAGE_TYPE_REMOVE_GROUP, &response) 440 | .await?; 441 | } 442 | _ => { 443 | error!("Unknown message type: {}", message_type_byte); 444 | let error_response: RpcResponse<()> = RpcResponse { 445 | success: None, 446 | error: Some("Unknown message type".to_string()), 447 | }; 448 | self.send_response(call_id.into(), MESSAGE_TYPE_ERROR, &error_response) 449 | .await?; 450 | } 451 | } 452 | 453 | Ok(()) 454 | } 455 | 456 | async fn send_response( 457 | &self, 458 | call_id: u64, 459 | message_type: u8, 460 | response: &RpcResponse, 461 | ) -> Result<()> { 462 | let mut response_buf = vec![message_type]; 463 | let payload = serde_cbor::to_vec(response)?; 464 | response_buf.extend_from_slice(&payload); 465 | 466 | self.backend 467 | .get_veilid_api() 468 | .await 469 | .ok_or_else(|| anyhow!("Veilid API not available"))? 470 | .app_call_reply(call_id.into(), response_buf) 471 | .await?; 472 | 473 | Ok(()) 474 | } 475 | 476 | pub async fn set_name(&self, name: &str) -> Result<()> { 477 | self.descriptor.set_name(name).await 478 | } 479 | pub async fn get_name(&self) -> Result { 480 | self.descriptor.get_name().await 481 | } 482 | 483 | pub async fn join_group(&self, request: JoinGroupRequest) -> RpcResponse { 484 | let group_url = request.group_url; 485 | info!("Joining group with URL: {}", group_url); 486 | 487 | // Use the backend to join the group from the provided URL 488 | let backend = self.backend.clone(); 489 | 490 | match backend.join_from_url(&group_url).await { 491 | Ok(group) => { 492 | let repo_keys: Vec = group.list_repos().await; 493 | 494 | for repo_key in repo_keys { 495 | if let Ok(repo) = group.get_repo(&repo_key).await { 496 | if let Err(err) = replicate_repo(&group, &repo).await { 497 | error!("Failed to replicate repository: {:?}", err); 498 | } 499 | } 500 | } 501 | 502 | RpcResponse { 503 | success: Some(JoinGroupResponse { 504 | status_message: format!( 505 | "Successfully joined and replicated group from URL: {}", 506 | group_url 507 | ), 508 | }), 509 | error: None, 510 | } 511 | } 512 | Err(err) => RpcResponse { 513 | success: None, 514 | error: Some(format!("Failed to join group: {}", err)), 515 | }, 516 | } 517 | } 518 | 519 | pub async fn list_groups(&self) -> RpcResponse { 520 | let backend = self.backend.clone(); 521 | 522 | match backend.list_groups().await { 523 | Ok(groups) => RpcResponse { 524 | success: Some(ListGroupsResponse { 525 | group_ids: groups.iter().map(|g| g.id().to_string()).collect(), 526 | }), 527 | error: None, 528 | }, 529 | Err(err) => RpcResponse { 530 | success: None, 531 | error: Some(format!("Failed to list groups: {}", err)), 532 | }, 533 | } 534 | } 535 | 536 | pub async fn remove_group( 537 | &self, 538 | request: RemoveGroupRequest, 539 | ) -> RpcResponse { 540 | let group_id = request.group_id; 541 | info!("Removing group with ID: {}", group_id); 542 | 543 | let backend = self.backend.clone(); 544 | 545 | let group_bytes = match URL_SAFE_NO_PAD.decode(&group_id) { 546 | Ok(bytes) => bytes, 547 | Err(err) => { 548 | return RpcResponse { 549 | success: None, 550 | error: Some(format!("Failed to decode group ID: {}", err)), 551 | }; 552 | } 553 | }; 554 | 555 | let group_bytes: [u8; 32] = match group_bytes.try_into() { 556 | Ok(bytes) => bytes, 557 | Err(v) => { 558 | return RpcResponse { 559 | success: None, 560 | error: Some(format!("Expected 32 bytes, got {}", v.len())), 561 | }; 562 | } 563 | }; 564 | 565 | let group_key = CryptoKey::new(group_bytes); 566 | 567 | match backend.close_group(group_key).await { 568 | Ok(_) => RpcResponse { 569 | success: Some(RemoveGroupResponse { 570 | status_message: format!("Successfully removed group: {}", group_id), 571 | }), 572 | error: None, 573 | }, 574 | Err(err) => RpcResponse { 575 | success: None, 576 | error: Some(format!("Failed to remove group: {}", err)), 577 | }, 578 | } 579 | } 580 | 581 | pub async fn replicate_known_groups(&self) -> Result<()> { 582 | info!("Replicating all known groups..."); 583 | 584 | // Fetch all known group IDs from the backend 585 | let group_ids = self.backend.list_known_group_ids().await?; 586 | 587 | // Iterate over each group and replicate it 588 | for group_id in group_ids { 589 | info!("Replicating group with ID: {:?}", group_id); 590 | 591 | // Retrieve the group object 592 | let group = self.backend.get_group(&group_id).await?; 593 | 594 | // Fetch and replicate all repositories within the group 595 | for repo_key in group.list_repos().await { 596 | info!("Processing repository with crypto key: {:?}", repo_key); 597 | 598 | let repo = group.get_repo(&repo_key).await?; 599 | replicate_repo(&group, &repo).await?; 600 | } 601 | } 602 | 603 | info!("All known groups replicated successfully."); 604 | Ok(()) 605 | } 606 | } 607 | 608 | async fn replicate_repo(group: &Group, repo: &Repo) -> Result<()> { 609 | if !repo.can_write() { 610 | let collection_hash = repo.get_hash_from_dht().await?; 611 | if !group.has_hash(&collection_hash).await? { 612 | download(group, &collection_hash).await?; 613 | } 614 | } 615 | 616 | // List the files in the repo 617 | let files = repo.list_files().await?; 618 | 619 | for file_name in files { 620 | info!("Processing file: {}", file_name); 621 | 622 | let file_hash = repo.get_file_hash(&file_name).await?; 623 | 624 | // If the repo is not writable and the file hash is not found in the group, attempt to download it. 625 | if !repo.can_write() && !group.has_hash(&file_hash).await? { 626 | download(group, &file_hash).await?; 627 | } 628 | // Attempt to retrieve the file using download_file_from 629 | if let Ok(route_id_blob) = repo.get_route_id_blob().await { 630 | group 631 | .iroh_blobs 632 | .download_file_from(route_id_blob, &file_hash) 633 | .await?; 634 | info!("Successfully replicated file: {}", file_name); 635 | } else { 636 | error!("Failed to get route ID blob for file: {}", file_name); 637 | } 638 | } 639 | 640 | Ok(()) 641 | } 642 | 643 | async fn download(group: &Group, hash: &Hash) -> Result<()> { 644 | let repo_keys: Vec = group.list_repos().await; 645 | 646 | if repo_keys.is_empty() { 647 | return Err(anyhow!("Cannot download hash. No repos found")); 648 | } 649 | 650 | for repo_key in repo_keys.iter() { 651 | let repo = group.get_repo(repo_key).await?; 652 | if let Ok(route_id_blob) = repo.get_route_id_blob().await { 653 | println!( 654 | "Downloading {} from {} via {:?}", 655 | hash, 656 | repo.id(), 657 | route_id_blob 658 | ); 659 | // Attempt to download the file from the peer 660 | let result = group 661 | .iroh_blobs 662 | .download_file_from(route_id_blob, hash) 663 | .await; 664 | if result.is_ok() { 665 | return Ok(()); 666 | } else { 667 | eprintln!("Unable to download from peer, {}", result.unwrap_err()); 668 | } 669 | } 670 | } 671 | 672 | Err(anyhow!("Unable to download from any peer")) 673 | } 674 | 675 | impl DHTEntity for RpcServiceDescriptor { 676 | async fn set_name(&self, name: &str) -> Result<()> { 677 | let routing_context = self.get_routing_context(); 678 | let key = self.get_dht_record().key().clone(); 679 | let encrypted_name = self.encrypt_aead(name.as_bytes(), None)?; 680 | routing_context 681 | .set_dht_value(key, 0, encrypted_name, None) 682 | .await?; 683 | Ok(()) 684 | } 685 | 686 | fn get_id(&self) -> CryptoKey { 687 | self.dht_record.key().value.clone() 688 | } 689 | 690 | fn get_secret_key(&self) -> Option { 691 | None 692 | } 693 | 694 | fn get_encryption_key(&self) -> SharedSecret { 695 | self.keypair.encryption_key.clone() 696 | } 697 | 698 | fn get_dht_record(&self) -> DHTRecordDescriptor { 699 | self.dht_record.clone() 700 | } 701 | 702 | fn get_routing_context(&self) -> RoutingContext { 703 | self.routing_context.clone() 704 | } 705 | 706 | fn get_veilid_api(&self) -> VeilidAPI { 707 | self.veilid.clone() 708 | } 709 | } 710 | --------------------------------------------------------------------------------