├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENCE ├── README.org ├── ideas.org ├── images └── borgtui-mount-screen.png ├── make_aur_dist.sh ├── rustfmt.toml └── src ├── backends ├── backup_provider.rs ├── borg_provider.rs ├── mod.rs ├── rustic_mount.rs └── rustic_provider.rs ├── borgtui.rs ├── cli.rs ├── gui ├── gui.rs └── mod.rs ├── main.rs ├── mod.rs ├── profiles.rs └── types.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | # the folder where I git pulled the arch aur repo 3 | /archlinux 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "borgtui" 3 | version = "0.5.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | anyhow = "1.0.70" 10 | async-recursion = "1.0.4" 11 | async-trait = "0.1.81" 12 | borgbackup = { version = "0.9.1", features = ["tokio"] } 13 | #borgbackup = { git = "https://github.com/dpbriggs/borgbackup-fork.git", rev = "d5c8ff0", features = ["tokio"]} 14 | chrono = "0.4.24" 15 | clap = { version = "4.2.0", features = ["derive", "env"] } 16 | clap_complete = "4.3.0" 17 | clap_mangen = "0.2.11" 18 | crossterm = "0.26.1" 19 | dirs = "5.0.0" 20 | fuse_mt = "0.6.1" 21 | glob = "0.3.1" 22 | itertools = "0.14.0" 23 | keyring = "2.0.1" 24 | libc = "0.2.169" 25 | notify = { version = "5.1.0", default-features = false, features = ["macos_kqueue"] } 26 | notify-rust = "4.8.0" 27 | open = "5.0.0" 28 | ratatui = "0.25.0" 29 | rustic_backend = "0.5.2" 30 | rustic_core = "0.7.3" 31 | serde = { version = "1.0.159", features = ["derive"] } 32 | serde_json = "1.0.95" 33 | tokio = { version = "1.27.0", features = ["full"] } 34 | tracing = "0.1.37" 35 | tracing-appender = "0.2.2" 36 | tracing-subscriber = "0.3.16" 37 | walkdir = "2.3.3" 38 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | Borg backup TUI and automation tool. 635 | Copyright (C) 2023 David Briggs 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | borgtui Copyright (C) 2023 David Briggs 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.org: -------------------------------------------------------------------------------- 1 | * BorgTUI 2 | 3 | A simple TUI and CLI to automate your Borg (and Rustic) backups :^) 4 | 5 | [[https://user-images.githubusercontent.com/5560032/244952009-ae19036a-8044-4c00-8d42-5305ad6a9860.png][https://user-images.githubusercontent.com/5560032/244952009-ae19036a-8044-4c00-8d42-5305ad6a9860.png]] 6 | 7 | ** Features 8 | 9 | - *Simple*: Manage your backup sources, backups, and repositories from either the TUI or CLI. 10 | - *Ergonomic*: Find, add, and canonicalize backup paths [[https://user-images.githubusercontent.com/5560032/244952253-57126b10-d749-4337-9eb4-d6633ee8e0a5.png][from the TUI]]. 11 | - *Concurrent*: All operations occur concurrently and in parallel for each repository. 12 | - *Easy*: Just Works™. BorgTUI will backup every source directory to every remote repository. 13 | - *Friendly*: Shell completions, desktop notifications, systemd unit generation. 14 | - *Automation*: Designed to perform regular and scheduled backups. 15 | - *Documentation*: Has man pages and helpful error messages. 16 | 17 | ** Installation 18 | 19 | *** Arch Linux 20 | 21 | BorgTUI is available on the AUR as [[https://aur.archlinux.org/packages/borgtui-git][borgtui-git]]. 22 | 23 | *** Manual Installation 24 | 25 | Please see the [[#manual-installation-1][Manual Installation]] section in the appendix. 26 | 27 | ** Quick Start Guide 28 | 29 | *** Initializing or Adding a New Repository 30 | 31 | BorgTUI normally retrieves the repository encryption passphrase from your keyring. However, when setting up a new repository 32 | BorgTUI reads the passphrase from the environment or from the command-line with =borgtui (init|add-repo) -e =. 33 | The easiest way is to set =BORG_PASSPHRASE= and you can set this using =read= and pasting in the password followed by enter: 34 | 35 | #+begin_src bash 36 | read -s BORG_PASSPHRASE 37 | #+end_src 38 | 39 | You'll need to export that variable for BorgTUI to pick it up: 40 | 41 | #+begin_src bash 42 | export BORG_PASSPHRASE 43 | #+end_src 44 | 45 | If you want to *add an existing* repository, use =borgtui add-repo=: 46 | 47 | #+begin_src bash 48 | borgtui add-repo ssh://abcdefg@abc123.repo.borgbase.com/./repo 49 | #+end_src 50 | 51 | If you want to *create* a new repository, use =borgtui init=: 52 | 53 | #+begin_src bash 54 | borgtui init ssh://david@home-nas/hdd1/borg 55 | #+end_src 56 | 57 | *** Adding Sources 58 | 59 | You can add new backup sources by using the directory chooser in =borgtui=. 60 | Press "a" to open the directory chooser. Autocomplete and canonicalize paths with "TAB". 61 | 62 | [[https://user-images.githubusercontent.com/5560032/244952253-57126b10-d749-4337-9eb4-d6633ee8e0a5.png][https://user-images.githubusercontent.com/5560032/244952253-57126b10-d749-4337-9eb4-d6633ee8e0a5.png]] 63 | 64 | You can also use =borgtui add= on the command-line: 65 | 66 | #+begin_src bash 67 | borgtui add 68 | #+end_src 69 | 70 | You can manually add paths by editing the profile json file under =~/.config/borgtui/profiles/=. 71 | Make sure to run that path through =realpath= first! 72 | 73 | *** Creating a Backup 74 | 75 | You can create a new backup across each repository by pressing "u" when =borgtui= is open. 76 | 77 | [[https://user-images.githubusercontent.com/5560032/244974358-5322a8b0-6e0f-4893-ac3d-0b1eeeecacae.png][https://user-images.githubusercontent.com/5560032/244974358-5322a8b0-6e0f-4893-ac3d-0b1eeeecacae.png]] 78 | 79 | You can also create a backup on the command-line using: 80 | 81 | #+begin_src bash 82 | borgtui create 83 | #+end_src 84 | 85 | *** Listing Archives 86 | 87 | You can list archives in the TUI by hitting "l". You can also list backups by using: 88 | 89 | #+begin_src bash 90 | borgtui list 91 | #+end_src 92 | 93 | *** Verifying Backups (Check) 94 | 95 | You can verify the integrity of backups by using =$ borgtui check=. This is currently only supported in CLI mode. 96 | 97 | #+begin_src bash 98 | borgtui check 99 | #+end_src 100 | 101 | You can create systemd units for verifying the backups by issuing: 102 | 103 | #+begin_src bash 104 | borgtui systemd-create-unit --install --check-unit 105 | borgtui systemd-create-unit --install --check-unit --timer 106 | systemctl --user daemon-reload 107 | systemctl --user enable --now borgtui-check-default.timer 108 | #+end_src 109 | 110 | *** Restoring from a Backup 111 | 112 | BorgTUI supports restoring from backups by mounting an archive or repository and allowing users to interactively restore from that. 113 | 114 | **** Restoring from the TUI 115 | 116 | In the UI hit "m" or "M" to select an archive or a repository and the select a mount point. Use 'G' to unmount once you've restored from a backup. 117 | 118 | [[./images/borgtui-mount-screen.png][./images/borgtui-mount-screen.png]] 119 | 120 | BorgTUI will automatically suggest "~/borg-mount" as a mount point and will create the folder when selected. 121 | If you exit the TUI without unmounting you can use the following command to unmount: 122 | 123 | #+begin_src bash 124 | borgui umount 125 | #+end_src 126 | 127 | **** Restoring from the CLI 128 | 129 | Currently BorgTUI supports mounting an archive or repository. First you need to select and archive to restore from: 130 | 131 | #+begin_src bash 132 | borgtui list 133 | 2023-06-11T22:15:31.551471Z INFO borgtui: /hdd3/NewBackup::real-2023-04-23:14:01:00 134 | 2023-06-11T22:15:31.551481Z INFO borgtui: /hdd3/NewBackup::real-2023-04-23:23:27:23 135 | ... truncated ... 136 | #+end_src 137 | 138 | The archive =/hdd3/NewBackup::real-2023-04-23:14:01:00= looks good. The repository =/hdd3/NewBackup= is also a good choice. 139 | Let's mount the archive at =~/borg-mount=: 140 | 141 | #+begin_src bash 142 | mkdir ~/borg-mount 143 | borgtui mount /hdd3/NewBackup::real-2023-04-23:14:01:00 ~/borg-mount 144 | #+end_src 145 | 146 | You can also mount whole repositories: 147 | 148 | #+begin_src bash 149 | borgtui mount ~/borg-mount 150 | #+end_src 151 | 152 | Your backup should show up in that folder. Ideally use a file manager to browse and restore whatever files you need selectively. 153 | You can unmount a mounted archive or repository by using =borgtui umount= 154 | 155 | #+begin_src bash 156 | borgtui umount ~/borg-mount 157 | #+end_src 158 | 159 | *** Profiles 160 | 161 | BorgTUI supports having several profiles which each contain their own backup sources and borg repositories. 162 | The default profile is called "default" and lives under =~/.config/borgtui/profiles/default.json= on Linux. 163 | You can set =BORGTUI_PROFILE= in your environment or use =borgtui -p ...= to select the profile. 164 | 165 | The default screen in BorgTUI is the profile view where you can see your backup paths and repositories: 166 | 167 | [[https://user-images.githubusercontent.com/5560032/244976922-1fbc3393-a4ba-44be-8b2c-31b3cc02b831.png][https://user-images.githubusercontent.com/5560032/244976922-1fbc3393-a4ba-44be-8b2c-31b3cc02b831.png]] 168 | 169 | **** Creating New Profiles 170 | 171 | Interacting with BorgTUI will automatically create a profile called "default" unless a different profile is specified. 172 | You can create a new profile with: 173 | 174 | #+begin_src bash 175 | borgtui add-profile 176 | #+end_src 177 | 178 | *** Pruning and Compacting 179 | 180 | You can prune by pressing "\" in the TUI or by issuing: 181 | 182 | #+begin_src bash 183 | borgtui prune 184 | #+end_src 185 | 186 | You can compact a repo by pressing "c" in the TUI or by issuing: 187 | 188 | #+begin_src bash 189 | borgtui compact 190 | #+end_src 191 | 192 | *** Disabling a Repository 193 | 194 | To disable a repository so that BorgTUI won't interact with it set the disabled flag in the repository section of the configuration: 195 | 196 | #+begin_src json 197 | "repos": [ 198 | { 199 | "path": "/path/to/borg/repo", 200 | "encryption": "Keyring", 201 | "disabled": true 202 | }, 203 | ] 204 | #+end_src 205 | 206 | To find your profile use the "config-path" subcommand: 207 | 208 | #+begin_src bash 209 | borgtui config-path 210 | #+end_src 211 | 212 | This is useful to prevent unnecessary errors and logs when you're upgrading your NAS or BorgBase decides to have an extended outage :^) 213 | 214 | ** Automatic Scheduled Backups 215 | 216 | BorgTUI is designed to regularly back up your files. 217 | 218 | *** Systemd User Units 219 | 220 | BorgTUI contains systemd user unit templates you can use to automate backups. You can install the service and timer with (replace =-default= with your custom profile name if you have one): 221 | 222 | #+begin_src bash 223 | borgtui systemd-create-unit --install 224 | borgtui systemd-create-unit --install --timer 225 | systemctl --user daemon-reload 226 | systemctl --user enable --now borgtui-create-default.timer 227 | #+end_src 228 | 229 | By default the backup occurs every night at 9PM local time. Edit the timer unit in =~/.config/systemd/user/= to modify the schedule. 230 | BorgTUI will issue a notification that the backup completed. 231 | 232 | You can trigger backups manually with: 233 | 234 | #+begin_src bash 235 | systemctl --user start borgtui-create-default 236 | #+end_src 237 | 238 | You can view logs of past backups with: 239 | 240 | #+begin_src bash 241 | journalctl --user -u borgtui-create-default 242 | #+end_src 243 | 244 | *** Without Systemd 245 | 246 | Simply issue =borgtui create= with the scheduling system of your choosing (cron, etc). 247 | 248 | ** Upgrading BorgTUI Versions 249 | *** Upgrading to 0.5.0 - Repository Format Change 250 | 251 | In =0.5.0= the repository format has changed to better isolate repository options. It went from: 252 | 253 | #+begin_src json 254 | "repos": [ 255 | { 256 | "path": "/home/david/borg-test-repo0", 257 | "rsh": "foobar", 258 | "encryption": "None", 259 | "disabled": false, 260 | "kind": "Borg" 261 | }, 262 | ] 263 | #+end_src 264 | 265 | To repository options living under a config enum: 266 | 267 | #+begin_src json 268 | "repos": [ 269 | { 270 | "path": "/home/david/borg-test-repo0", 271 | "encryption": "None", 272 | "disabled": false, 273 | "config": { 274 | "BorgV1": { 275 | "rsh": "foobar" 276 | } 277 | } 278 | }, 279 | ] 280 | #+end_src 281 | 282 | BorgTUI will transparently load the "V1" config and transform it into the latest config file format on save. 283 | To update to the latest config format, use =$ borgtui update-config= or open BorgTUI and hit "s" to "save profile". 284 | 285 | ** Appendix 286 | 287 | *** **Experimental** Rustic Backend Support 288 | 289 | BorgTUI supports [[https://rustic.cli.rs/][Rustic]] as a native backup backend. The configuration is nearly identical except for a repository =kind= in configs: 290 | 291 | #+begin_src json 292 | ... 293 | "repos": [ 294 | { 295 | "kind": "Rustic" 296 | "path": "/home/david/restic-test-repo", 297 | ... 298 | }, 299 | ] 300 | ... 301 | #+end_src 302 | 303 | To initialize a rustic repo follow the usual init process but with =--kind rustic= passed in. 304 | Same idea for adding repos -- just do =borgtui add-repo --kind rustic <..>=. 305 | 306 | **WARNING**: Rustic support is not production grade yet. It's intended to provide N+1 redundancy in terms of backup providers. Please use it in conjunction with other backup providers (borg, whatever you use, etc). 307 | 308 | *** Manual Installation 309 | 310 | You can manually install BorgTUI with [[https://doc.rust-lang.org/cargo/getting-started/installation.html][cargo]]: 311 | 312 | #+begin_src bash 313 | cargo install --git https://github.com/dpbriggs/borgtui.git 314 | #+end_src 315 | 316 | **** Shell Completion 317 | 318 | Shell completions can be enabled by sourcing completions generated by BorgTUI. Replace "zsh" with whatever shell you're using (e.g. "bash"): 319 | 320 | #+begin_src bash 321 | source <(borgtui shell-completion --shell zsh) 322 | #+end_src 323 | 324 | **** Install Man Pages 325 | 326 | Install the man pages at a location with: 327 | 328 | #+begin_src bash 329 | borgtui install-man-pages 330 | #+end_src 331 | 332 | On most systems you can use =manpath= to find where to install those man-pages. 333 | 334 | *** Why does this exist? 335 | 336 | I wanted a tool to automatically *backup the same set of folders to every repository*. 337 | I couldn't get Vorta to ergonomically backup the same set of folders to several remote repositories ([[https://github.com/borgbase/vorta/issues/942][issue]]). 338 | 339 | ** Known issues 340 | 341 | *** Password-based SSH doesn't work and messes up the terminal 342 | 343 | I can't find a way to ask SSH to not ask for a password without modifying the actual SSH command used or editing the ssh_config. 344 | I can't do the latter so a refactor would need to occur somewhere between BorgTUI and borg itself. 345 | 346 | *** If you attempt to init or add a repository with a faulty (or not-running) keyring the profile saves but the password doesn't 347 | 348 | A workaround is to start whatever keyring you use (search "wallet", open chromium, etc), remove the repo from the config-path, and then use =add-repo= to re-add it. 349 | 350 | ** Choice Excerpt from the Licence 351 | 352 | Please carefully read the [[file:LICENCE][LICENCE]] file before using this program. 353 | 354 | #+begin_quote 355 | 15. Disclaimer of Warranty. 356 | 357 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 358 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 359 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 360 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 361 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 362 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 363 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 364 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 365 | 366 | 16. Limitation of Liability. 367 | 368 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 369 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 370 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 371 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 372 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 373 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 374 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 375 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 376 | SUCH DAMAGES. 377 | #+end_quote 378 | 379 | ** Note 380 | 381 | This work is not affiliated with my employer in any way. 382 | 383 | ** Attribution 384 | 385 | *** Rustic 386 | 387 | While developing the rustic backend I reviewed the source code of https://github.com/rustic-rs/rustic/tree/main 388 | and in particular, the mounting/unmounting system. 389 | -------------------------------------------------------------------------------- /ideas.org: -------------------------------------------------------------------------------- 1 | * Ideas 2 | ** DONE "borgtui add" will add a folder path to the default profile 3 | CLOSED: [2023-04-01 Sat 15:23] 4 | ** DONE "borgtui create" will generate a backup for each repository under a profile. 5 | CLOSED: [2023-04-02 Sun 10:47] 6 | ** DONE Use the async borgtui create 7 | CLOSED: [2023-04-09 Sun 09:43] 8 | ** DONE Have a different screen than backing up files 9 | CLOSED: [2023-04-16 Sun 16:27] 10 | ** DONE Have a chart of the backup speed for each repo 11 | CLOSED: [2023-04-16 Sun 16:28] 12 | ** DONE Have both the TUI and non-interactive mode use the same command channel 13 | CLOSED: [2023-04-16 Sun 16:28] 14 | ** DONE Have a basic profile view 15 | CLOSED: [2023-04-16 Sun 16:28] 16 | ** DONE Have the ability to add backup paths from the tui 17 | CLOSED: [2023-04-21 Fri 22:21] 18 | ** DONE Have a severity for info_logs 19 | CLOSED: [2023-04-22 Sat 10:06] 20 | Not quite the spirit but we have error logs now. 21 | ** DONE Add compaction 22 | CLOSED: [2023-04-22 Sat 10:18] 23 | ** DONE Make list archives a table 24 | CLOSED: [2023-04-22 Sat 20:08] 25 | ** DONE Paths with spaces in them are broken! 26 | CLOSED: [2023-04-22 Sat 21:14] 27 | Thanks Matti for the quote idea! 28 | ** DONE Add exclude patterns 29 | CLOSED: [2023-04-24 Mon 20:13] 30 | ** DONE Add mounting support 31 | CLOSED: [2023-04-30 Sun 15:56] 32 | ** DONE Add a special wrapper type for the passphrase 33 | CLOSED: [2023-04-30 Sun 16:04] 34 | ** DONE Automatically canonicalize paths in the backup directory chooser 35 | CLOSED: [2023-04-30 Sun 16:11] 36 | ** DONE Watch the profile config file and update it when it's edited externally 37 | CLOSED: [2023-05-05 Fri 10:00] 38 | ** DONE Refactor the input popup modal to be generic 39 | CLOSED: [2023-05-12 Fri 20:47] 40 | ** DONE Have a command to create a systemd unit 41 | CLOSED: [2023-05-21 Sun 13:46] 42 | ** DONE Add a systemd timer unit 43 | CLOSED: [2023-06-11 Sun 12:10] 44 | ** DONE Have the ability to disable repos 45 | CLOSED: [2023-08-22 Tue 20:46] 46 | ** DONE Add a subcommand to print out the configuration dir used 47 | CLOSED: [2023-08-22 Tue 20:59] 48 | ** DONE Don't immediately bail when an error occurs in CLI mode for create (run them in parallel?) 49 | CLOSED: [2023-08-22 Tue 21:22] 50 | ** DONE Have a cli command to create a new profile 51 | CLOSED: [2023-08-23 Wed 18:42] 52 | ** DONE Have the ability to mount from the UI 53 | CLOSED: [2023-08-26 Sat 15:32] 54 | ** DONE Add the ability to unmount in the UI 55 | CLOSED: [2023-08-26 Sat 20:56] 56 | ** DONE Add the ability to list repositories from the CLI 57 | CLOSED: [2023-09-03 Sun 10:50] 58 | ** DONE Add the ability to set the borg passphrase from the CLI (=set-passphrase --keyfile= | --borg_passphrase==) 59 | CLOSED: [2023-09-24 Sun 13:49] 60 | ** DONE Just print the repository name when using list-repos 61 | CLOSED: [2023-10-29 Sun 11:58] 62 | ** DONE Have the ability to list archives per-repo 63 | CLOSED: [2023-10-29 Sun 13:03] 64 | ** DONE Use Passphrase instead of String everywhere 65 | CLOSED: [2023-11-03 Fri 13:07] 66 | ** DONE Add borg check support 67 | CLOSED: [2023-11-03 Fri 13:57] 68 | ** DONE Add systemd unit options for check 69 | CLOSED: [2023-11-03 Fri 14:10] 70 | ** DONE Have a --latest flag for listing archives 71 | CLOSED: [2023-11-04 Sat 10:42] 72 | ** DONE Using =init= on a new profile should make that profile 73 | CLOSED: [2024-02-10 Sat 19:46] 74 | ** TODO Have a cursor in the profiles screen so you can interact and edit them 75 | ** TODO Update the README to document the project 76 | ** TODO Add the ability to move the cursor when adding projects by CLI 77 | ** TODO Add a cursor in "InputFieldWithSuggestions" to help track which one is "selected" 78 | ** TODO Have a subcommand to make invalid states in setting the repo password unrepresentable 79 | ** TODO Check if a file exists when using set-password with a keyfile 80 | ** TODO Fix how notifications are handled in check (there's several in different places) 81 | ** TODO Make a generic notifications function 82 | ** TODO Suggest a canonicalized path in the "add file path" popup 83 | ** TODO BUG: Mount points aren't sorted in most recent order 84 | -------------------------------------------------------------------------------- /images/borgtui-mount-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dpbriggs/borgtui/fdcf13ba063a9e8516e5bc5f679af5504304819d/images/borgtui-mount-screen.png -------------------------------------------------------------------------------- /make_aur_dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is a hacky script to assist in automating borgtui AUR package updates. 4 | 5 | set -e 6 | 7 | cd /home/david/programming/borgtui/archlinux/borgtui-git 8 | git pull origin 9 | makepkg -cf 10 | makepkg --printsrcinfo > .SRCINFO 11 | git add . 12 | git commit -m "Automated regular package update" 13 | git push origin 14 | cd - 15 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" -------------------------------------------------------------------------------- /src/backends/backup_provider.rs: -------------------------------------------------------------------------------- 1 | /// Backup providers for BorgTUI 2 | use std::{path::PathBuf, sync::Arc}; 3 | 4 | use async_trait::async_trait; 5 | use tokio::sync::Semaphore; 6 | 7 | use crate::profiles::{Passphrase, PruneOptions, Repository, RepositoryOptions}; 8 | use crate::types::{BorgResult, CommandResponseSender, RepositoryArchives}; 9 | 10 | #[async_trait] 11 | pub(crate) trait BackupProvider: Send { 12 | #[allow(clippy::too_many_arguments)] 13 | async fn create_backup( 14 | &self, 15 | archive_name: String, 16 | backup_paths: &[PathBuf], 17 | exclude_patterns: &[String], 18 | exclude_caches: bool, 19 | repo: Repository, 20 | progress_channel: CommandResponseSender, 21 | completion_semaphore: Arc, 22 | ) -> BorgResult<()>; 23 | async fn list_archives(&self, repo: &Repository) -> BorgResult; 24 | async fn init_repo( 25 | &self, 26 | repo_loc: String, 27 | passphrase: Option, 28 | config: RepositoryOptions, 29 | ) -> BorgResult<()>; 30 | async fn mount( 31 | &self, 32 | repo: &Repository, 33 | given_repository_path: String, 34 | mountpoint: PathBuf, 35 | ) -> BorgResult<()>; 36 | // TODO: Figure out unmounting 37 | #[allow(unused)] 38 | async fn unmount(&self, mountpoint: PathBuf) -> BorgResult<()>; 39 | async fn prune( 40 | &self, 41 | repo: &Repository, 42 | prune_options: PruneOptions, 43 | progress_channel: CommandResponseSender, 44 | ) -> BorgResult<()>; 45 | async fn compact( 46 | &self, 47 | repo: &Repository, 48 | progress_channel: CommandResponseSender, 49 | ) -> BorgResult<()>; 50 | async fn check( 51 | &self, 52 | repo: &Repository, 53 | progress_channel: CommandResponseSender, 54 | ) -> BorgResult; 55 | async fn repair( 56 | &self, 57 | repo: &Repository, 58 | progress_channel: CommandResponseSender, 59 | ) -> BorgResult; 60 | } 61 | -------------------------------------------------------------------------------- /src/backends/borg_provider.rs: -------------------------------------------------------------------------------- 1 | use std::{path::PathBuf, process::Stdio}; 2 | 3 | use anyhow::anyhow; 4 | use async_trait::async_trait; 5 | use borgbackup::{ 6 | asynchronous as borg_async, 7 | common::{ 8 | CommonOptions, EncryptionMode, InitOptions, MountOptions, MountSource, 9 | PruneOptions as BorgLibPruneOptions, 10 | }, 11 | output::list::ListRepository as BorgLibListRepository, 12 | }; 13 | use tracing::info; 14 | 15 | use crate::{ 16 | borgtui::CommandResponse, 17 | profiles::{Passphrase, Repository, RepositoryOptions}, 18 | types::{ 19 | send_check_complete, send_check_progress, send_error, send_info, take_repo_lock, Archive, 20 | BackupCreateProgress, BackupCreationProgress, BorgResult, CommandResponseSender, 21 | RepositoryArchives, 22 | }, 23 | }; 24 | 25 | impl From for RepositoryArchives { 26 | fn from(value: BorgLibListRepository) -> Self { 27 | RepositoryArchives { 28 | path: value.repository.location, 29 | archives: value 30 | .archives 31 | .into_iter() 32 | .map(|archive| Archive { 33 | name: archive.name, 34 | creation_date: archive.start, 35 | }) 36 | .collect(), 37 | } 38 | } 39 | } 40 | 41 | impl From for BackupCreationProgress { 42 | fn from(value: borg_async::CreateProgress) -> Self { 43 | match value { 44 | borg_async::CreateProgress::Progress { 45 | original_size, 46 | compressed_size, 47 | deduplicated_size, 48 | nfiles, 49 | path, 50 | } => BackupCreationProgress::InProgress { 51 | original_size, 52 | compressed_size, 53 | deduplicated_size, 54 | num_files: nfiles, 55 | current_path: path, 56 | }, 57 | borg_async::CreateProgress::Finished => BackupCreationProgress::Finished, 58 | } 59 | } 60 | } 61 | 62 | fn make_common_options(repo: &Repository) -> BorgResult { 63 | let borg_options = repo.borg_options()?; 64 | Ok(CommonOptions { 65 | rsh: borg_options.rsh.clone(), 66 | remote_path: borg_options.remote_path.clone(), 67 | ..Default::default() 68 | }) 69 | } 70 | 71 | /// TODO: tie this into the repo which was mounted! 72 | pub(crate) async fn hack_unmount(mountpoint: PathBuf) -> BorgResult<()> { 73 | rustic_provider::RusticProvider {} 74 | .unmount(mountpoint) 75 | .await?; 76 | Ok(()) 77 | } 78 | 79 | async fn borg_check( 80 | repo: &Repository, 81 | passphrase: Option, 82 | progress_channel: CommandResponseSender, 83 | repair: bool, 84 | ) -> BorgResult { 85 | let repo_path = repo.path(); 86 | let rsh = repo.borg_options()?.rsh.clone(); 87 | let mut extra_args = vec![]; 88 | if repair { 89 | extra_args.push("--repair"); 90 | } 91 | let mut process = tokio::process::Command::new("borg") 92 | .stdin(Stdio::null()) 93 | .stdout(Stdio::piped()) 94 | .stderr(Stdio::piped()) 95 | .env( 96 | "BORG_PASSPHRASE", 97 | passphrase.map(|p| p.inner()).unwrap_or_default(), 98 | ) 99 | .env("BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "YES") 100 | .args( 101 | rsh.map(|r| vec!["--rsh".to_string(), r]) 102 | .unwrap_or_default(), 103 | ) 104 | .arg("--progress") 105 | .arg("--log-json") 106 | .arg("check") 107 | .args(extra_args) 108 | .arg(repo_path.clone()) 109 | .spawn()?; 110 | 111 | if let Some(reader) = process.stderr.take() { 112 | let progress_channel_clone = progress_channel.clone(); 113 | let repo_loc = repo.path(); 114 | tokio::spawn(async move { 115 | use tokio::io::AsyncBufReadExt; 116 | let bb = tokio::io::BufReader::new(reader); 117 | let mut lines = bb.lines(); 118 | while let Ok(Some(line)) = lines.next_line().await { 119 | let msg = line 120 | .parse::() 121 | .ok() 122 | .and_then(|jj| jj.get("message").cloned()); 123 | if let Some(msg) = msg { 124 | let msg = format!("{}", msg); 125 | send_check_progress!(progress_channel_clone, repo_loc.clone(), msg); 126 | } 127 | } 128 | }); 129 | } 130 | 131 | let exit = process.wait().await?; 132 | if !exit.success() { 133 | let err = format!("Borg check failed for {repo_path}"); 134 | send_check_complete!(progress_channel, repo_path, Some(err)); 135 | } else { 136 | send_check_complete!(progress_channel, repo_path, None); 137 | send_info!( 138 | progress_channel, 139 | format!("Verification succeeded for repository: {}", repo) 140 | ); 141 | } 142 | Ok(exit.success()) 143 | } 144 | 145 | use super::{backup_provider::BackupProvider, rustic_provider}; 146 | 147 | pub(crate) struct BorgProvider; 148 | 149 | impl BorgProvider {} 150 | 151 | #[async_trait] 152 | impl BackupProvider for BorgProvider { 153 | async fn create_backup( 154 | &self, 155 | archive_name: String, 156 | backup_paths: &[PathBuf], 157 | exclude_patterns: &[String], 158 | exclude_caches: bool, 159 | repo: Repository, 160 | progress_channel: CommandResponseSender, 161 | completion_semaphore: std::sync::Arc, 162 | ) -> BorgResult<()> { 163 | // CreateOptions 164 | let backup_paths = backup_paths 165 | .iter() 166 | .map(|path| format!("'{}'", path.to_string_lossy())) 167 | .collect::>(); 168 | 169 | let mut create_option = borgbackup::common::CreateOptions::new( 170 | repo.path(), 171 | archive_name.clone(), 172 | backup_paths.to_vec(), 173 | vec![], 174 | ); 175 | create_option.passphrase = repo.get_passphrase()?.map(|p| p.inner()); 176 | create_option.excludes = exclude_patterns 177 | .iter() 178 | .cloned() 179 | .map(borgbackup::common::Pattern::Shell) 180 | .collect(); 181 | create_option.exclude_caches = exclude_caches; 182 | 183 | // Convert borgs create progress into ours 184 | 185 | let (create_progress_send, mut create_progress_recv) = 186 | tokio::sync::mpsc::channel::(200); 187 | 188 | let common_options = make_common_options(&repo)?; 189 | 190 | let repo_name_clone = repo.path(); 191 | let progress_channel_task = progress_channel.clone(); 192 | tokio::spawn(async move { 193 | take_repo_lock!( 194 | progress_channel_task, 195 | repo, 196 | "A backup is already in progress for {}, waiting..." 197 | ); 198 | send_info!( 199 | progress_channel_task, 200 | format!("Grabbed repo lock, starting the backup for {}", repo) 201 | ); 202 | // TODO: I think the UI doesn't update if you issue two backups in a row 203 | while let Some(progress) = create_progress_recv.recv().await { 204 | let create_progress = BackupCreateProgress { 205 | repository: repo_name_clone.clone(), 206 | create_progress: progress.into(), 207 | }; 208 | if let Err(e) = progress_channel_task 209 | .send(CommandResponse::CreateProgress(create_progress)) 210 | .await 211 | { 212 | tracing::error!("Failed to send CreateProgress update: {}", e); 213 | } 214 | } 215 | }); 216 | 217 | // Actually spawn the borg backup 218 | 219 | let progress_channel_clone = progress_channel.clone(); 220 | let completion_semaphore_clone = completion_semaphore.clone(); 221 | tokio::spawn(async move { 222 | let res = 223 | borg_async::create_progress(&create_option, &common_options, create_progress_send) 224 | .await; 225 | completion_semaphore_clone.add_permits(1); 226 | match res { 227 | Ok(c) => info!( 228 | "Archive created successfully in repo {}: {:?}", 229 | c.repository.location, c.archive.stats 230 | ), 231 | Err(e) => send_error!( 232 | progress_channel_clone, 233 | format!( 234 | "Failed to create archive {} in repo {}: {:?}", 235 | create_option.archive, create_option.repository, e 236 | ) 237 | ), 238 | }; 239 | }); 240 | Ok(()) 241 | } 242 | 243 | async fn list_archives(&self, repo: &Repository) -> BorgResult { 244 | let list_options = borgbackup::common::ListOptions { 245 | repository: repo.path(), 246 | passphrase: repo.get_passphrase()?.map(|p| p.inner()), 247 | }; 248 | let res = borg_async::list(&list_options, &make_common_options(repo)?) 249 | .await 250 | .map_err(|e| anyhow!("Failed to list archives in repo {}: {:?}", repo.path(), e))?; 251 | Ok(res.into()) 252 | } 253 | async fn init_repo( 254 | &self, 255 | repo_loc: String, 256 | passphrase: Option, 257 | config: RepositoryOptions, 258 | ) -> BorgResult<()> { 259 | let encryption_mode = match passphrase { 260 | Some(passphrase) => EncryptionMode::Repokey(passphrase.inner()), 261 | None => EncryptionMode::None, 262 | }; 263 | let init_options = InitOptions::new(repo_loc, encryption_mode); 264 | borg_async::init( 265 | &init_options, 266 | &CommonOptions { 267 | rsh: config.borg_options()?.rsh.clone(), 268 | ..Default::default() 269 | }, 270 | ) 271 | .await 272 | .map_err(|e| anyhow!("Failed to init repo: {}", e))?; 273 | Ok(()) 274 | } 275 | 276 | async fn mount( 277 | &self, 278 | repo: &Repository, 279 | given_repository_path: String, 280 | mountpoint: PathBuf, 281 | ) -> BorgResult<()> { 282 | if repo.disabled() { 283 | anyhow::bail!("Attempted to mount disabled repo: {}", repo); 284 | } 285 | // See if the path exists, and if not, try to make it 286 | if let Ok(false) = tokio::fs::try_exists(&mountpoint).await { 287 | info!( 288 | "Attempting to create directory for mounting: {}", 289 | mountpoint.to_string_lossy() 290 | ); 291 | tokio::fs::create_dir_all(&mountpoint).await?; 292 | } 293 | // TODO: Check if this is already mounted! 294 | let mount_source = if given_repository_path.contains("::") { 295 | MountSource::Archive { 296 | archive_name: given_repository_path.clone(), 297 | } 298 | } else { 299 | MountSource::Repository { 300 | name: repo.path(), 301 | first_n_archives: None, 302 | last_n_archives: None, 303 | glob_archives: None, 304 | } 305 | }; 306 | let mut mount_options = 307 | MountOptions::new(mount_source, mountpoint.to_string_lossy().to_string()); 308 | mount_options.passphrase = repo.get_passphrase()?.map(|p| p.inner()); 309 | borg_async::mount(&mount_options, &make_common_options(repo)?) 310 | .await 311 | .map_err(|e| anyhow!("Failed to mount repo {}: {}", repo.path(), e))?; 312 | info!( 313 | "Successfully mounted {} at {:?}", 314 | given_repository_path, mountpoint 315 | ); 316 | Ok(()) 317 | } 318 | 319 | // TODO: Figure out unused 320 | #[allow(unused)] 321 | async fn unmount(&self, mountpoint: PathBuf) -> BorgResult<()> { 322 | borg_async::umount( 323 | mountpoint.to_string_lossy().to_string(), 324 | &CommonOptions::default(), 325 | ) 326 | .await 327 | .map_err(|e| anyhow!("Failed to umount path {:?}: {}", mountpoint, e)) 328 | } 329 | 330 | async fn prune( 331 | &self, 332 | repo: &Repository, 333 | prune_options: crate::profiles::PruneOptions, 334 | progress_channel: CommandResponseSender, 335 | ) -> BorgResult<()> { 336 | take_repo_lock!(progress_channel, repo); 337 | let mut compact_options = BorgLibPruneOptions::new(repo.path()); 338 | compact_options.passphrase = repo.get_passphrase()?.map(|p| p.inner()); 339 | compact_options.keep_daily = Some(prune_options.keep_daily); 340 | compact_options.keep_weekly = Some(prune_options.keep_weekly); 341 | compact_options.keep_monthly = Some(prune_options.keep_monthly); 342 | compact_options.keep_yearly = Some(prune_options.keep_yearly); 343 | borg_async::prune(&compact_options, &make_common_options(repo)?) 344 | .await 345 | .map_err(|e| anyhow!("Failed to prune repo {}: {:?}", repo.path(), e))?; 346 | Ok(()) 347 | } 348 | async fn compact( 349 | &self, 350 | repo: &Repository, 351 | progress_channel: CommandResponseSender, 352 | ) -> BorgResult<()> { 353 | let compact_options = borgbackup::common::CompactOptions { 354 | repository: repo.path(), 355 | }; 356 | take_repo_lock!(progress_channel, repo); 357 | borg_async::compact(&compact_options, &make_common_options(repo)?) 358 | .await 359 | .map_err(|e| anyhow!("Failed to compact repo {}: {:?}", repo.path(), e))?; 360 | Ok(()) 361 | } 362 | async fn check( 363 | &self, 364 | repo: &Repository, 365 | // TODO: Use this 366 | progress_channel: CommandResponseSender, 367 | ) -> BorgResult { 368 | take_repo_lock!(progress_channel, repo); 369 | borg_check(repo, repo.get_passphrase()?, progress_channel, false).await 370 | } 371 | async fn repair( 372 | &self, 373 | repo: &Repository, 374 | progress_channel: CommandResponseSender, 375 | ) -> BorgResult { 376 | take_repo_lock!(progress_channel, repo); 377 | borg_check(repo, repo.get_passphrase()?, progress_channel, true).await 378 | } 379 | } 380 | -------------------------------------------------------------------------------- /src/backends/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod backup_provider; 2 | pub(crate) mod borg_provider; 3 | mod rustic_mount; 4 | pub(crate) mod rustic_provider; 5 | -------------------------------------------------------------------------------- /src/backends/rustic_mount.rs: -------------------------------------------------------------------------------- 1 | // Copied from 2 | // https://github.com/rustic-rs/rustic/blob/main/src/commands/mount/fusefs.rs#L140 3 | // TODO: potentially rewrite 4 | // TODO: Maybe just pull in rustic_rs as a dep? 5 | #[cfg(not(windows))] 6 | use std::os::unix::prelude::OsStrExt; 7 | use std::{ 8 | collections::BTreeMap, 9 | ffi::{CString, OsStr}, 10 | path::Path, 11 | sync::RwLock, 12 | time::{Duration, SystemTime}, 13 | }; 14 | 15 | use rustic_core::{ 16 | repofile::{Node, NodeType}, 17 | vfs::{FilePolicy, OpenFile, Vfs}, 18 | IndexedFull, Repository, 19 | }; 20 | 21 | use fuse_mt::{ 22 | CallbackResult, DirectoryEntry, FileAttr, FileType, FilesystemMT, RequestInfo, ResultData, 23 | ResultEmpty, ResultEntry, ResultOpen, ResultReaddir, ResultSlice, ResultXattr, Xattr, 24 | }; 25 | use itertools::Itertools; 26 | 27 | pub struct FuseFS { 28 | repo: Repository, 29 | vfs: Vfs, 30 | open_files: RwLock>, 31 | now: SystemTime, 32 | file_policy: FilePolicy, 33 | } 34 | 35 | impl FuseFS { 36 | pub(crate) fn new(repo: Repository, vfs: Vfs, file_policy: FilePolicy) -> Self { 37 | let open_files = RwLock::new(BTreeMap::new()); 38 | 39 | Self { 40 | repo, 41 | vfs, 42 | open_files, 43 | now: SystemTime::now(), 44 | file_policy, 45 | } 46 | } 47 | 48 | fn node_from_path(&self, path: &Path) -> Result { 49 | self.vfs 50 | .node_from_path(&self.repo, path) 51 | .map_err(|_| libc::ENOENT) 52 | } 53 | 54 | fn dir_entries_from_path(&self, path: &Path) -> Result, i32> { 55 | self.vfs 56 | .dir_entries_from_path(&self.repo, path) 57 | .map_err(|_| libc::ENOENT) 58 | } 59 | } 60 | 61 | fn node_to_filetype(node: &Node) -> FileType { 62 | match node.node_type { 63 | NodeType::File => FileType::RegularFile, 64 | NodeType::Dir => FileType::Directory, 65 | NodeType::Symlink { .. } => FileType::Symlink, 66 | NodeType::Chardev { .. } => FileType::CharDevice, 67 | NodeType::Dev { .. } => FileType::BlockDevice, 68 | NodeType::Fifo => FileType::NamedPipe, 69 | NodeType::Socket => FileType::Socket, 70 | } 71 | } 72 | 73 | fn node_type_to_rdev(tpe: &NodeType) -> u32 { 74 | u32::try_from(match tpe { 75 | NodeType::Dev { device } | NodeType::Chardev { device } => *device, 76 | _ => 0, 77 | }) 78 | .unwrap() 79 | } 80 | 81 | fn node_to_linktarget(node: &Node) -> Option<&OsStr> { 82 | if node.is_symlink() { 83 | Some(node.node_type.to_link().as_os_str()) 84 | } else { 85 | None 86 | } 87 | } 88 | 89 | fn node_to_file_attr(node: &Node, now: SystemTime) -> FileAttr { 90 | FileAttr { 91 | // Size in bytes 92 | size: node.meta.size, 93 | // Size in blocks 94 | blocks: 0, 95 | // Time of last access 96 | atime: node.meta.atime.map(SystemTime::from).unwrap_or(now), 97 | // Time of last modification 98 | mtime: node.meta.mtime.map(SystemTime::from).unwrap_or(now), 99 | // Time of last metadata change 100 | ctime: node.meta.ctime.map(SystemTime::from).unwrap_or(now), 101 | // Time of creation (macOS only) 102 | crtime: now, 103 | // Kind of file (directory, file, pipe, etc.) 104 | kind: node_to_filetype(node), 105 | // Permissions 106 | perm: node.meta.mode.unwrap_or(0o755) as u16, 107 | // Number of hard links 108 | nlink: node.meta.links.try_into().unwrap_or(1), 109 | // User ID 110 | uid: node.meta.uid.unwrap_or(0), 111 | // Group ID 112 | gid: node.meta.gid.unwrap_or(0), 113 | // Device ID (if special file) 114 | rdev: node_type_to_rdev(&node.node_type), 115 | // Flags (macOS only; see chflags(2)) 116 | flags: 0, 117 | } 118 | } 119 | 120 | impl FilesystemMT for FuseFS { 121 | fn getattr(&self, _req: RequestInfo, path: &Path, _fh: Option) -> ResultEntry { 122 | let node = self.node_from_path(path)?; 123 | Ok((Duration::from_secs(1), node_to_file_attr(&node, self.now))) 124 | } 125 | 126 | #[cfg(not(windows))] 127 | fn readlink(&self, _req: RequestInfo, path: &Path) -> ResultData { 128 | let target = node_to_linktarget(&self.node_from_path(path)?) 129 | .ok_or(libc::ENOSYS)? 130 | .as_bytes() 131 | .to_vec(); 132 | 133 | Ok(target) 134 | } 135 | 136 | fn open(&self, _req: RequestInfo, path: &Path, _flags: u32) -> ResultOpen { 137 | if matches!(self.file_policy, FilePolicy::Forbidden) { 138 | return Err(libc::ENOTSUP); 139 | } 140 | let node = self.node_from_path(path)?; 141 | let open = self.repo.open_file(&node).map_err(|_| libc::ENOSYS)?; 142 | let fh = { 143 | let mut open_files = self.open_files.write().unwrap(); 144 | let fh = open_files.last_key_value().map_or(0, |(fh, _)| *fh + 1); 145 | _ = open_files.insert(fh, open); 146 | fh 147 | }; 148 | Ok((fh, 0)) 149 | } 150 | 151 | fn release( 152 | &self, 153 | _req: RequestInfo, 154 | _path: &Path, 155 | fh: u64, 156 | _flags: u32, 157 | _lock_owner: u64, 158 | _flush: bool, 159 | ) -> ResultEmpty { 160 | _ = self.open_files.write().unwrap().remove(&fh); 161 | Ok(()) 162 | } 163 | 164 | fn read( 165 | &self, 166 | _req: RequestInfo, 167 | _path: &Path, 168 | fh: u64, 169 | offset: u64, 170 | size: u32, 171 | callback: impl FnOnce(ResultSlice<'_>) -> CallbackResult, 172 | ) -> CallbackResult { 173 | if let Some(open_file) = self.open_files.read().unwrap().get(&fh) { 174 | if let Ok(data) = 175 | self.repo 176 | .read_file_at(open_file, offset.try_into().unwrap(), size as usize) 177 | { 178 | return callback(Ok(&data)); 179 | } 180 | } 181 | callback(Err(libc::ENOSYS)) 182 | } 183 | 184 | fn opendir(&self, _req: RequestInfo, _path: &Path, _flags: u32) -> ResultOpen { 185 | Ok((0, 0)) 186 | } 187 | 188 | fn readdir(&self, _req: RequestInfo, path: &Path, _fh: u64) -> ResultReaddir { 189 | let nodes = self.dir_entries_from_path(path)?; 190 | 191 | let result = nodes 192 | .into_iter() 193 | .map(|node| DirectoryEntry { 194 | name: node.name(), 195 | kind: node_to_filetype(&node), 196 | }) 197 | .collect(); 198 | Ok(result) 199 | } 200 | 201 | fn releasedir(&self, _req: RequestInfo, _path: &Path, _fh: u64, _flags: u32) -> ResultEmpty { 202 | Ok(()) 203 | } 204 | 205 | fn listxattr(&self, _req: RequestInfo, path: &Path, size: u32) -> ResultXattr { 206 | let node = self.node_from_path(path)?; 207 | let xattrs = node 208 | .meta 209 | .extended_attributes 210 | .into_iter() 211 | // convert into null-terminated [u8] 212 | .map(|a| CString::new(a.name).unwrap().into_bytes_with_nul()) 213 | .concat(); 214 | 215 | if size == 0 { 216 | Ok(Xattr::Size(u32::try_from(xattrs.len()).unwrap())) 217 | } else { 218 | Ok(Xattr::Data(xattrs)) 219 | } 220 | } 221 | 222 | fn getxattr(&self, _req: RequestInfo, path: &Path, name: &OsStr, size: u32) -> ResultXattr { 223 | let node = self.node_from_path(path)?; 224 | match node 225 | .meta 226 | .extended_attributes 227 | .into_iter() 228 | .find(|a| name == OsStr::new(&a.name)) 229 | { 230 | None => Err(libc::ENOSYS), 231 | Some(attr) => { 232 | let value = attr.value.unwrap_or_default(); 233 | if size == 0 { 234 | Ok(Xattr::Size(u32::try_from(value.len()).unwrap())) 235 | } else { 236 | Ok(Xattr::Data(value)) 237 | } 238 | } 239 | } 240 | } 241 | } 242 | 243 | // async fn mount( 244 | // repo: &Repository, 245 | // given_repository_path: String, 246 | // mountpoint: PathBuf, 247 | // ) -> BorgResult<()> { 248 | // todo!() 249 | // } 250 | 251 | // async fn unmount(mountpoint: PathBuf) -> BorgResult<()> { 252 | // todo!() 253 | // } 254 | -------------------------------------------------------------------------------- /src/backends/rustic_provider.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | path::PathBuf, 3 | sync::{ 4 | atomic::{AtomicBool, AtomicU64}, 5 | Arc, RwLock, 6 | }, 7 | }; 8 | 9 | use async_trait::async_trait; 10 | use fuse_mt::FuseMT; 11 | use rustic_core::repofile::SnapshotFile; 12 | use rustic_core::vfs::{FilePolicy, IdenticalSnapshot, Latest, Vfs}; 13 | use tokio::sync::Semaphore; 14 | 15 | use crate::{ 16 | backends::rustic_mount::FuseFS, 17 | borgtui::CommandResponse, 18 | profiles::{Passphrase, PruneOptions, Repository}, 19 | types::{ 20 | send_check_complete, send_error, send_info, take_repo_lock, Archive, BackupCreateProgress, 21 | BackupCreationProgress, BorgResult, CheckProgress, CommandResponseSender, PrettyBytes, 22 | RepositoryArchives, 23 | }, 24 | RepositoryOptions, 25 | }; 26 | 27 | use super::backup_provider::BackupProvider; 28 | 29 | const RESTIC_PASSPHRASE_REQUIRED: &str = "Restic Repositories require a password! Please check your configuration using `borgtui config-path`"; 30 | 31 | fn passphrase_from_repo(repo: &Repository) -> BorgResult { 32 | repo.get_passphrase()? 33 | .ok_or_else(|| anyhow::anyhow!(RESTIC_PASSPHRASE_REQUIRED)) 34 | } 35 | 36 | #[derive(Debug, Clone)] 37 | enum ProgressEmitterKind { 38 | Backup, 39 | Info, 40 | Check, 41 | } 42 | 43 | #[derive(Clone, Debug)] 44 | struct ProgressEmitter { 45 | sender: CommandResponseSender, 46 | prefix: String, 47 | repo_path: String, 48 | title: Arc>, 49 | length: Arc, 50 | counter: Arc, 51 | last_sent_counter: Arc, 52 | last_time_sent: Arc>, 53 | force_emit_now: Arc, 54 | hidden: bool, 55 | kind: ProgressEmitterKind, 56 | } 57 | 58 | impl ProgressEmitter { 59 | fn create_backup(sender: CommandResponseSender, repo_path: String) -> Self { 60 | // TODO: fix ugly time 61 | let last_time_sent = std::time::Instant::now() 62 | .checked_sub(std::time::Duration::from_secs(1)) 63 | .unwrap(); 64 | Self { 65 | sender, 66 | prefix: String::new(), 67 | repo_path, 68 | title: Arc::new(RwLock::new(String::new())), 69 | length: Arc::new(AtomicU64::new(0)), 70 | counter: Arc::new(AtomicU64::new(0)), 71 | last_sent_counter: Arc::new(AtomicU64::new(0)), 72 | last_time_sent: Arc::new(RwLock::new(last_time_sent)), 73 | force_emit_now: Arc::new(AtomicBool::new(false)), 74 | hidden: false, 75 | kind: ProgressEmitterKind::Backup, 76 | } 77 | } 78 | 79 | fn info(sender: CommandResponseSender, repo_path: String) -> Self { 80 | let mut ss = Self::create_backup(sender, repo_path); 81 | ss.kind = ProgressEmitterKind::Info; 82 | ss 83 | } 84 | 85 | fn check(sender: CommandResponseSender, repo_path: String) -> Self { 86 | let mut ss = Self::create_backup(sender, repo_path); 87 | ss.kind = ProgressEmitterKind::Check; 88 | ss 89 | } 90 | 91 | fn with_prefix(&self, prefix: String) -> Self { 92 | let mut ss = self.clone(); 93 | ss.prefix = prefix; 94 | ss 95 | } 96 | 97 | fn send_check_progress(&self) { 98 | let counter = self.counter.load(std::sync::atomic::Ordering::SeqCst); 99 | self.last_sent_counter 100 | .store(counter, std::sync::atomic::Ordering::SeqCst); 101 | let msg = format!( 102 | "{} {} / {}", 103 | &self.prefix, 104 | PrettyBytes(counter), 105 | PrettyBytes(self.length.load(std::sync::atomic::Ordering::SeqCst)) 106 | ); 107 | let msg = CommandResponse::CheckProgress(CheckProgress::new(self.repo_path.clone(), msg)); 108 | if let Err(e) = self.sender.blocking_send(msg) { 109 | tracing::error!("Failed to send check message: {e}"); 110 | } 111 | } 112 | 113 | fn send_info_progress(&self) { 114 | let counter = self.counter.load(std::sync::atomic::Ordering::SeqCst); 115 | self.last_sent_counter 116 | .store(counter, std::sync::atomic::Ordering::SeqCst); 117 | let msg = format!( 118 | "[{}] {} {} / {}", 119 | &self.repo_path, 120 | &self.prefix, 121 | PrettyBytes(counter), 122 | PrettyBytes(self.length.load(std::sync::atomic::Ordering::SeqCst)) 123 | ); 124 | let msg = CommandResponse::Info(msg); 125 | if let Err(e) = self.sender.blocking_send(msg) { 126 | tracing::error!("Failed to send info message: {e}"); 127 | } 128 | } 129 | 130 | fn send_create_progress(&self) { 131 | let byte_counter = self.counter.load(std::sync::atomic::Ordering::SeqCst); 132 | self.last_sent_counter 133 | .store(byte_counter, std::sync::atomic::Ordering::SeqCst); 134 | let total_size = self.length.load(std::sync::atomic::Ordering::SeqCst); 135 | let msg = format!( 136 | "{}: {} - {} / {}", 137 | &self.prefix, 138 | &*self.title.read().unwrap(), 139 | PrettyBytes(byte_counter), 140 | PrettyBytes(total_size) 141 | ); 142 | let progress = BackupCreationProgress::InProgress { 143 | original_size: total_size, 144 | compressed_size: byte_counter, 145 | deduplicated_size: byte_counter, 146 | num_files: 0, 147 | current_path: msg, 148 | }; 149 | let create_progress = BackupCreateProgress::new(self.repo_path.clone(), progress); 150 | let msg = CommandResponse::CreateProgress(create_progress); 151 | if let Err(e) = self.sender.blocking_send(msg) { 152 | tracing::error!("Failed to send create message: {e}"); 153 | } 154 | } 155 | 156 | fn maybe_send_backup_create_finished(&self) { 157 | // Actually finish 158 | if matches!(self.kind, ProgressEmitterKind::Backup) { 159 | let finished = BackupCreateProgress::finished(self.repo_path.clone()); 160 | let msg = CommandResponse::CreateProgress(finished); 161 | if let Err(e) = self.sender.blocking_send(msg) { 162 | tracing::error!("Failed to send finish message: {e}"); 163 | } 164 | } 165 | } 166 | 167 | fn send_message(&self) { 168 | let mut last_time_guard = self.last_time_sent.write().unwrap(); 169 | *last_time_guard = std::time::Instant::now(); 170 | match self.kind { 171 | ProgressEmitterKind::Backup => self.send_create_progress(), 172 | ProgressEmitterKind::Info => self.send_info_progress(), 173 | ProgressEmitterKind::Check => self.send_check_progress(), 174 | } 175 | } 176 | } 177 | 178 | const SUBSTANTIAL_CHANGE_THRESHOLD: f64 = 0.10; 179 | const SUBSTANTIAL_CHANGE_THRESHOLD_TIME: std::time::Duration = 180 | std::time::Duration::from_millis(200); 181 | 182 | impl rustic_core::Progress for ProgressEmitter { 183 | fn is_hidden(&self) -> bool { 184 | self.hidden 185 | } 186 | 187 | fn set_length(&self, len: u64) { 188 | self.length.store(len, std::sync::atomic::Ordering::SeqCst); 189 | } 190 | 191 | fn set_title(&self, title: &'static str) { 192 | let mut guard = self.title.write().unwrap(); 193 | *guard = title.to_string(); 194 | self.force_emit_now 195 | .store(true, std::sync::atomic::Ordering::SeqCst) 196 | } 197 | 198 | fn inc(&self, inc: u64) { 199 | let old = self 200 | .counter 201 | .fetch_add(inc, std::sync::atomic::Ordering::SeqCst); 202 | let new_value = old + inc; 203 | let last_sent_size = self 204 | .last_sent_counter 205 | .load(std::sync::atomic::Ordering::SeqCst); 206 | let is_substantial_change = 207 | (1.0 - last_sent_size as f64 / (new_value as f64)) >= SUBSTANTIAL_CHANGE_THRESHOLD; 208 | let is_substantial_time_diff = (std::time::Instant::now() 209 | - *self.last_time_sent.read().unwrap()) 210 | >= SUBSTANTIAL_CHANGE_THRESHOLD_TIME; 211 | let force_emit_now = self 212 | .force_emit_now 213 | .load(std::sync::atomic::Ordering::SeqCst); 214 | if is_substantial_change || is_substantial_time_diff || force_emit_now { 215 | self.send_message(); 216 | } 217 | } 218 | 219 | fn finish(&self) { 220 | if self.hidden { 221 | return; 222 | } 223 | // Ensure the last entry is always sent 224 | self.send_message(); 225 | } 226 | } 227 | 228 | impl rustic_core::ProgressBars for ProgressEmitter { 229 | type P = ProgressEmitter; 230 | 231 | fn progress_hidden(&self) -> Self::P { 232 | self.clone() 233 | } 234 | 235 | fn progress_spinner(&self, prefix: impl Into>) -> Self::P { 236 | self.with_prefix(prefix.into().to_string()) 237 | } 238 | 239 | fn progress_counter(&self, prefix: impl Into>) -> Self::P { 240 | self.with_prefix(prefix.into().to_string()) 241 | } 242 | 243 | fn progress_bytes(&self, prefix: impl Into>) -> Self::P { 244 | self.with_prefix(prefix.into().to_string()) 245 | } 246 | } 247 | 248 | fn rustic_cache_dir() -> Option { 249 | dirs::cache_dir().map(|mut p| { 250 | p.push("borgtui"); 251 | p.push("rustic"); 252 | p 253 | }) 254 | } 255 | 256 | pub(crate) struct RusticProvider; 257 | 258 | #[async_trait] 259 | impl BackupProvider for RusticProvider { 260 | async fn create_backup( 261 | &self, 262 | archive_name: String, 263 | backup_paths: &[PathBuf], 264 | exclude_patterns: &[String], 265 | exclude_caches: bool, 266 | repo: Repository, 267 | progress_channel: CommandResponseSender, 268 | completion_semaphore: Arc, 269 | ) -> BorgResult<()> { 270 | let backup_paths: Vec<_> = backup_paths 271 | .iter() 272 | .map(|bp| bp.to_string_lossy().to_string()) 273 | .collect(); 274 | 275 | let mut filter_opts = rustic_core::LocalSourceFilterOptions::default(); 276 | if exclude_caches { 277 | filter_opts = filter_opts.exclude_if_present(["CACHEDIR.TAG".to_string()]); 278 | } 279 | filter_opts = filter_opts.globs( 280 | exclude_patterns 281 | .iter() 282 | .map(|exclude_pattern| format!("!{exclude_pattern}")) 283 | .collect::>(), 284 | ); 285 | 286 | let fully_qualified_name = format!("{}::{}", repo.path(), &archive_name); 287 | send_info!( 288 | progress_channel, 289 | format!("Starting rustic backup of {fully_qualified_name}") 290 | ); 291 | let pb = ProgressEmitter::create_backup(progress_channel.clone(), repo.path()); 292 | let handle = tokio::task::spawn_blocking(move || -> BorgResult<()> { 293 | // Backend 294 | let repo_loc = repo.path(); 295 | let backends = rustic_backend::BackendOptions::default() 296 | .repository(repo_loc) 297 | .to_backends()?; 298 | 299 | // Passphrase 300 | let passphrase = passphrase_from_repo(&repo)?; 301 | // Actually open the connection 302 | let mut repo_opts = 303 | rustic_core::RepositoryOptions::default().password(passphrase.inner()); 304 | if let Some(cache_dir) = rustic_cache_dir() { 305 | repo_opts = repo_opts.cache_dir(cache_dir); 306 | repo_opts.no_cache = false; 307 | } 308 | let rustic_repo = 309 | rustic_core::Repository::new_with_progress(&repo_opts, &backends, pb.clone())? 310 | .open()? 311 | .to_indexed_ids()?; 312 | let backup_opts = rustic_core::BackupOptions::default().ignore_filter_opts(filter_opts); 313 | let sources = rustic_core::PathList::from_iter(backup_paths); 314 | let mut snap = rustic_core::SnapshotOptions::default() 315 | .add_tags("borgtui")? 316 | .to_snapshot()?; 317 | snap.label = archive_name; 318 | let snap = rustic_repo.backup(&backup_opts, &sources, snap)?; 319 | tracing::info!("Snapshot taken! {}", snap.label); 320 | pb.maybe_send_backup_create_finished(); 321 | Ok(()) 322 | }); 323 | 324 | tokio::spawn(async move { 325 | match handle.await { 326 | Ok(Ok(_)) => { 327 | send_info!( 328 | progress_channel, 329 | format!("Completed rustic backup for {}", fully_qualified_name) 330 | ); 331 | } 332 | Ok(Err(e)) => send_error!(progress_channel, format!("Rustic backup failed: {e}")), 333 | Err(e) => send_error!( 334 | progress_channel, 335 | format!("Failed to spawn thread for Rustic backup: {e}") 336 | ), 337 | } 338 | completion_semaphore.add_permits(1); 339 | }); 340 | Ok(()) 341 | } 342 | async fn list_archives(&self, repo: &Repository) -> BorgResult { 343 | let repo_loc = repo.path(); 344 | let passphrase = repo.get_passphrase()?; 345 | let backends = rustic_backend::BackendOptions::default() 346 | .repository(&repo_loc) 347 | .to_backends()?; 348 | 349 | let mut repo_opts = rustic_core::RepositoryOptions::default(); 350 | if let Some(passphrase) = passphrase { 351 | repo_opts = repo_opts.password(passphrase.inner()) 352 | } 353 | let res = tokio::task::spawn_blocking(move || -> BorgResult { 354 | let snapshots = rustic_core::Repository::new(&repo_opts, &backends)? 355 | .open()? 356 | .get_all_snapshots()?; 357 | let mut archives: Vec = snapshots 358 | .iter() 359 | .map(|snapshot| Archive { 360 | name: snapshot.label.clone(), 361 | creation_date: snapshot.time.date_naive().into(), 362 | }) 363 | .collect(); 364 | // Sort so the most recent archive is the last (borg behaviour) 365 | archives.sort_by(|left, right| { 366 | left.creation_date 367 | .partial_cmp(&right.creation_date) 368 | .unwrap() 369 | }); 370 | Ok(RepositoryArchives::new(repo_loc, archives)) 371 | }) 372 | // TODO: This one actually blocks? 373 | .await??; 374 | Ok(res) 375 | } 376 | 377 | async fn init_repo( 378 | &self, 379 | repo_loc: String, 380 | passphrase: Option, 381 | _config: RepositoryOptions, 382 | ) -> BorgResult<()> { 383 | let passphrase = match passphrase { 384 | Some(passphrase) => passphrase, 385 | None => anyhow::bail!( 386 | "Restic repositories require a password. Please provide one. See `borgtui init -h`." 387 | ), 388 | }; 389 | let backends = rustic_backend::BackendOptions::default() 390 | .repository(&repo_loc) 391 | .to_backends()?; 392 | 393 | let repo_opts = rustic_core::RepositoryOptions::default().password(passphrase.inner()); 394 | let key_opts = rustic_core::KeyOptions::default(); 395 | let config_opts = rustic_core::ConfigOptions::default(); 396 | tracing::info!("Initializing rustic repo: {repo_loc}"); 397 | rustic_core::Repository::new(&repo_opts, &backends)?.init(&key_opts, &config_opts)?; 398 | tracing::info!("Successfully initialized rustic repo: {repo_loc}"); 399 | Ok(()) 400 | } 401 | 402 | async fn mount( 403 | &self, 404 | repo: &Repository, 405 | // TODO: support mounting particular snapshots 406 | given_repository_path: String, 407 | mountpoint: PathBuf, 408 | ) -> BorgResult<()> { 409 | if repo.disabled() { 410 | anyhow::bail!("Attempted to mount disabled repo: {}", repo); 411 | } 412 | // See if the path exists, and if not, try to make it 413 | if let Ok(false) = tokio::fs::try_exists(&mountpoint).await { 414 | tracing::info!( 415 | "Attempting to create directory for mounting: {}", 416 | mountpoint.to_string_lossy() 417 | ); 418 | tokio::fs::create_dir_all(&mountpoint).await?; 419 | } 420 | 421 | let passphrase = passphrase_from_repo(repo)?; 422 | let repo_loc = repo.path(); 423 | let backends = rustic_backend::BackendOptions::default() 424 | .repository(&repo_loc) 425 | .to_backends()?; 426 | 427 | let repo_opts = rustic_core::RepositoryOptions::default().password(passphrase.inner()); 428 | 429 | // TODO: do something with this join handle 430 | let _join_handle = tokio::task::spawn_blocking(move || -> BorgResult<()> { 431 | let rustic_repo = rustic_core::Repository::new(&repo_opts, &backends)?.open()?; 432 | 433 | let (_repo_path, snapshot_label) = 434 | given_repository_path.split_once("::").unwrap_or_default(); 435 | 436 | let sn_filter = |sn: &SnapshotFile| { 437 | if snapshot_label.is_empty() { 438 | return true; 439 | } 440 | sn.label == snapshot_label 441 | }; 442 | 443 | let vfs = Vfs::from_snapshots( 444 | rustic_repo.get_matching_snapshots(sn_filter)?, 445 | "[{hostname}]/[{label}]", 446 | // TODO: Make this a borgtui constant (we use it in the archive name) 447 | "%Y-%m-%d:%H:%M:%S", 448 | Latest::AsLink, 449 | IdenticalSnapshot::AsLink, 450 | )?; 451 | let file_policy = FilePolicy::Read; // TODO: I should probably be smarter here 452 | 453 | tracing::info!( 454 | "Mounting rustic repo: {} at {}", 455 | repo_loc, 456 | mountpoint.to_string_lossy(), 457 | ); 458 | tracing::info!("BorgTUI will block until the filesystem is unmounted."); 459 | tracing::info!("Make sure to run `umount {mountpoint:?}` to unmount the filesystem once you are done."); 460 | let fuse_mt = FuseMT::new(FuseFS::new(rustic_repo.to_indexed()?, vfs, file_policy), 1); 461 | fuse_mt::mount(fuse_mt, &mountpoint, &[])?; 462 | Ok(()) 463 | }); 464 | Ok(()) 465 | } 466 | 467 | // TODO: Wire unmounting in with repositories 468 | async fn unmount(&self, mountpoint: PathBuf) -> BorgResult<()> { 469 | let exit = tokio::process::Command::new("umount") 470 | .arg(mountpoint) 471 | .spawn()? 472 | .wait() 473 | .await?; 474 | tracing::info!("umount finished with exitcode {exit:?}"); 475 | Ok(()) 476 | } 477 | 478 | async fn prune( 479 | &self, 480 | repo: &Repository, 481 | prune_options: PruneOptions, 482 | progress_channel: CommandResponseSender, 483 | ) -> BorgResult<()> { 484 | take_repo_lock!(progress_channel, repo); 485 | 486 | let repo_loc = repo.path(); 487 | 488 | let backends = rustic_backend::BackendOptions::default() 489 | .repository(&repo_loc) 490 | .to_backends()?; 491 | let passphrase = passphrase_from_repo(repo)?; 492 | 493 | let pb = ProgressEmitter::info(progress_channel.clone(), repo_loc.clone()); 494 | let handle = tokio::task::spawn_blocking(move || { 495 | // Actually open the connection 496 | let repo_opts = rustic_core::RepositoryOptions::default().password(passphrase.inner()); 497 | let rustic_repo = 498 | rustic_core::Repository::new_with_progress(&repo_opts, &backends, pb)?.open()?; 499 | let keep_options = rustic_core::KeepOptions::default() 500 | .keep_daily(prune_options.keep_daily.get() as i32) 501 | .keep_weekly(prune_options.keep_weekly.get() as i32) 502 | .keep_monthly(prune_options.keep_monthly.get() as i32) 503 | .keep_yearly(prune_options.keep_yearly.get() as i32); 504 | let forget_ids = rustic_repo 505 | .get_forget_snapshots( 506 | &keep_options, 507 | rustic_core::SnapshotGroupCriterion::default() 508 | .hostname(false) 509 | .label(false) 510 | .paths(false) 511 | .tags(false), 512 | |_| true, 513 | )? 514 | .into_forget_ids(); 515 | // TODO: use send_info_blocking (and write that macro) 516 | tracing::info!( 517 | "Removing {} rustic snapshots in {}.", 518 | forget_ids.len(), 519 | repo_loc, 520 | ); 521 | rustic_repo.delete_snapshots(&forget_ids)?; 522 | let prune_opts = rustic_core::PruneOptions::default().ignore_snaps(forget_ids); 523 | let prune_plan = rustic_repo.prune_plan(&prune_opts)?; 524 | // TODO: use send_info_blocking 525 | tracing::info!("Pruning {}...", repo_loc); 526 | rustic_repo.prune(&prune_opts, prune_plan)?; 527 | Ok::<(), anyhow::Error>(()) 528 | }); 529 | let repo_loc_clone = repo.path(); 530 | tokio::spawn(async move { 531 | match handle.await { 532 | Ok(Ok(_)) => send_info!( 533 | progress_channel, 534 | format!("Successfully pruned {repo_loc_clone}") 535 | ), 536 | Ok(Err(e)) => send_error!(progress_channel, format!("Rustic prune failed: {e}")), 537 | Err(e) => send_error!(progress_channel, format!("Failed to spawn thread: {e}")), 538 | } 539 | }); 540 | Ok(()) 541 | } 542 | async fn compact( 543 | &self, 544 | _repo: &Repository, 545 | _progress_channel: CommandResponseSender, 546 | ) -> BorgResult<()> { 547 | tracing::warn!( 548 | "BorgTUI's implementation of Rustic repositories automatically compact when pruning!" 549 | ); 550 | Ok(()) 551 | } 552 | 553 | async fn check( 554 | &self, 555 | repo: &Repository, 556 | progress_channel: CommandResponseSender, 557 | ) -> BorgResult { 558 | take_repo_lock!(progress_channel, repo); 559 | let repo_loc = repo.path(); 560 | 561 | let backends = rustic_backend::BackendOptions::default() 562 | .repository(&repo_loc) 563 | .to_backends()?; 564 | let passphrase = passphrase_from_repo(repo)?; 565 | 566 | let progress_channel_clone = progress_channel.clone(); 567 | let res = tokio::task::spawn_blocking(move || { 568 | let pb = ProgressEmitter::check(progress_channel_clone, repo_loc.clone()); 569 | let repo_opts = rustic_core::RepositoryOptions::default().password(passphrase.inner()); 570 | let rustic_repo = 571 | rustic_core::Repository::new_with_progress(&repo_opts, &backends, pb)?.open()?; 572 | let check_options = rustic_core::CheckOptions::default(); 573 | rustic_repo.check(check_options) 574 | }) 575 | .await?; 576 | match res { 577 | Ok(_) => { 578 | send_check_complete!(progress_channel, repo.path(), None); 579 | send_info!( 580 | progress_channel, 581 | format!("Verification succeeded for repository: {repo}") 582 | ); 583 | Ok(true) 584 | } 585 | Err(e) => { 586 | let err_msg = format!("Rustic check failed: {e}"); 587 | send_check_complete!(progress_channel, repo.path(), Some(err_msg.clone())); 588 | send_error!(progress_channel, err_msg.clone()); 589 | Ok(false) 590 | } 591 | } 592 | } 593 | async fn repair( 594 | &self, 595 | _repo: &Repository, 596 | _progress_channel: CommandResponseSender, 597 | ) -> BorgResult { 598 | // TODO: Implement this! 599 | anyhow::bail!("Unimplemented") 600 | } 601 | } 602 | -------------------------------------------------------------------------------- /src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use async_recursion::async_recursion; 4 | use clap::{CommandFactory, Parser, Subcommand}; 5 | use clap_complete::{generate, shells}; 6 | use tokio::io::AsyncWriteExt; 7 | 8 | use crate::{ 9 | profiles::{Passphrase, RepositoryKind}, 10 | types::BorgResult, 11 | }; 12 | 13 | const ABOUT: &str = "A TUI and CLI to help automate borg (and rustic) backups :^)"; 14 | 15 | #[derive(Parser, Debug, Clone)] 16 | #[command( 17 | author = "David Briggs ", 18 | version, 19 | about = ABOUT, 20 | long_about = ABOUT 21 | )] 22 | pub(crate) struct Args { 23 | #[command(subcommand)] 24 | pub(crate) action: Option, 25 | 26 | /// The profile to use. If not specified, the default profile 27 | /// will be used. 28 | #[arg(global = true, env, short = 'p', long = "profile")] 29 | pub(crate) borgtui_profile: Option, 30 | 31 | /// Watch for changes in the profile and automatically reload on modify. 32 | /// This feature is experimental. 33 | #[arg(short, long)] 34 | pub(crate) watch_profile: bool, 35 | } 36 | 37 | #[derive(Parser, Debug, Clone)] 38 | pub(crate) struct PassphraseSource { 39 | /// Use a keyfile (a file path containing the repo passphrase) 40 | #[arg(short, long)] 41 | pub(crate) keyfile: Option, 42 | /// Store the borg passphrase in the config file. 43 | #[arg(short = 'c', long)] 44 | pub(crate) raw: bool, 45 | /// No borg passphrase associated with this repository. 46 | /// 47 | /// Implied if no other borg passphrase sources are specified. 48 | #[arg(short, long)] 49 | pub(crate) none: bool, 50 | /// Obtain the borg passphrase from the environment. 51 | #[arg(env)] 52 | pub(crate) borg_passphrase: Option, 53 | } 54 | 55 | impl PassphraseSource { 56 | pub(crate) fn get_passphrase(&self) -> BorgResult> { 57 | if self.none { 58 | return Ok(None); 59 | } 60 | if let Some(borg_passphrase) = &self.borg_passphrase { 61 | Ok(Some(borg_passphrase.clone())) 62 | } else if let Some(keyfile) = &self.keyfile { 63 | let passphrase = std::fs::read_to_string(keyfile)?.trim().to_string(); 64 | Ok(Some(passphrase.into())) 65 | } else { 66 | Ok(None) 67 | } 68 | } 69 | } 70 | 71 | #[derive(Subcommand, Debug, Clone)] 72 | pub(crate) enum Action { 73 | /// Initialize a new borg repository and add it to a profile 74 | Init { 75 | /// SSH command to use when connecting to the repo 76 | #[arg(short, long)] 77 | rsh: Option, 78 | 79 | /// The repo location. It should be a file path or ssh string. 80 | /// 81 | /// Examples: 82 | /// - /hdd3/NewBackup 83 | /// - /hdd2/NewBackup 84 | location: String, 85 | 86 | /// Repository kind ("borg" or "rustic"). 87 | #[arg(long, default_value = "borg")] 88 | kind: RepositoryKind, 89 | 90 | #[command(flatten)] 91 | passphrase_loc: PassphraseSource, 92 | }, 93 | /// Create a new backup 94 | Create, 95 | /// Add a directory to the profile to backup 96 | Add { 97 | /// The directory or file path to add to backup 98 | directory: PathBuf, 99 | }, 100 | /// Remove a directory from a profile (will no longer be backed up) 101 | Remove { 102 | /// The directory or file path to add to backup 103 | directory: PathBuf, 104 | }, 105 | /// Add an existing repository to the profile. 106 | /// 107 | /// It's recommended to set BORG_PASSPHRASE in your environment and export it. 108 | AddRepo { 109 | /// The directory or file path to add to backup 110 | repository: String, 111 | 112 | /// SSH command to use when connecting to the repo 113 | #[arg(short, long)] 114 | rsh: Option, 115 | 116 | /// Repository kind ("borg" or "rustic"). 117 | #[arg(long, default_value = "borg")] 118 | kind: RepositoryKind, 119 | 120 | #[command(flatten)] 121 | passphrase_loc: PassphraseSource, 122 | }, 123 | /// Create a new profile with a given name 124 | AddProfile { name: String }, 125 | /// Mount a borg repo or archive as a FUSE filesystem. 126 | Mount { 127 | /// The directory or file path to add to backup 128 | repository_path: String, 129 | /// The mount point 130 | mountpoint: PathBuf, 131 | /// If set, don't open a GUI file manager to browse the mounted 132 | /// repository or archive. 133 | #[arg(short, long)] 134 | do_not_open_in_gui_file_manager: bool, 135 | }, 136 | /// Unmount a mounted Borg repo or archive 137 | Umount { 138 | /// The mount point 139 | mountpoint: PathBuf, 140 | }, 141 | /// List the archives in a directory. 142 | /// 143 | /// By default it'll print the three most recent archives. 144 | List { 145 | /// If specified, only list archives from this repository. 146 | repository: Option, 147 | /// If set, show all archives from all repositories. 148 | #[arg(short, long)] 149 | all: bool, 150 | /// How many archives per repository to list 151 | #[arg(short, long, default_value = "3")] 152 | count: usize, 153 | }, 154 | /// List the repositories associated with the profile. 155 | ListRepos, 156 | /// Update the configuration file format. No-op if you're on the latest config version. 157 | UpdateConfig, 158 | /// Set the password for a repository. By default it will read 159 | /// BORG_PASSPHRASE from the environment unless --keyfile is specified. 160 | SetPassword { 161 | /// Name of the repository (use `borgtui list-repos` to list) 162 | repo: String, 163 | 164 | #[command(flatten)] 165 | passphrase_loc: PassphraseSource, 166 | }, 167 | /// Compact a borg repo 168 | Compact, 169 | /// Prune a borg repo 170 | Prune, 171 | /// Verify the integrity of all active repositories. This usually takes a long time. 172 | /// 173 | /// It will display a notification and log on failure. 174 | Check { 175 | /// If specified, only check these repositories. Consider using `list-repos` to get repo urls. 176 | only_these_repos: Option>, 177 | }, 178 | /// Repair backups. This is potentially dangerous - use with caution! 179 | Repair { 180 | /// If specified, only repair these repositories. Consider using `list-repos` to get repo urls. 181 | only_these_repos: Option>, 182 | }, 183 | /// Create a systemd unit to create a backup 184 | /// 185 | /// If `--install` is specified it will install the unit as a user unit under 186 | /// ~/.config/systemd/user. To use this unit you will need to reload user 187 | /// units and start the unit. 188 | /// 189 | /// $ systemctl --user daemon-reload 190 | /// 191 | /// $ systemctl --user start borgtui-create-{profile_name}.service 192 | /// 193 | /// The default profile is aptly named "default" so the command used is: 194 | /// 195 | /// $ systemctl --user start borgtui-create-default.service 196 | SystemdCreateUnit { 197 | /// If true, save the unit under ~/.config/systemd/user/ 198 | #[arg(long)] 199 | install: bool, 200 | /// If set, make a check unit instead of a create unit. 201 | #[arg(long)] 202 | check_unit: bool, 203 | /// If set, save the save the unit to the path specified. This option implies 204 | /// --install 205 | #[arg(long)] 206 | install_path: Option, 207 | /// If set, create a timer unit instead of a service unit. 208 | #[arg(long)] 209 | timer: bool, 210 | }, 211 | /// Print the configuration file path for the given profile (defaults to "default") 212 | /// 213 | /// Note that this command will _open_ the profile to see if it exists. 214 | ConfigPath, 215 | /// Generate shell completion scripts (printed to stdout) 216 | ShellCompletion { 217 | /// Type of shell to print completions for the specified shell. Defaults to zsh. 218 | /// 219 | /// Allowed options: "zsh", "bash", "fish", "elvish", "powershell" 220 | #[arg(long, default_value = "zsh")] 221 | shell: String, 222 | }, 223 | /// Install man pages in a target directory. 224 | InstallManPages { 225 | /// Path where man pages will be written. Several files will be written 226 | /// as borgtui uses subcommands. 227 | man_root: PathBuf, 228 | }, 229 | } 230 | 231 | pub(crate) async fn print_manpage(man_root: PathBuf) -> BorgResult<()> { 232 | // Adapted from https://github.com/clap-rs/clap/discussions/3603#discussioncomment-3641542 233 | #[async_recursion] 234 | async fn write_man_page(dir: &Path, app: &clap::Command) -> BorgResult<()> { 235 | // `get_display_name()` is `Some` for all instances, except the root. 236 | let name = app.get_display_name().unwrap_or_else(|| app.get_name()); 237 | let mut out = tokio::fs::File::create(dir.join(format!("{name}.1"))).await?; 238 | 239 | let mut buf = Vec::new(); 240 | clap_mangen::Man::new(app.clone()) 241 | .title(name) 242 | .render(&mut buf)?; 243 | out.write_all(buf.as_slice()).await?; 244 | out.flush().await?; 245 | 246 | for sub in app.get_subcommands() { 247 | write_man_page(dir, sub).await?; 248 | } 249 | Ok(()) 250 | } 251 | let mut command = Args::command(); 252 | command.build(); 253 | write_man_page(man_root.as_path(), &command).await?; 254 | Ok(()) 255 | } 256 | 257 | pub(crate) fn print_shell_completion(shell_kind: &str) -> BorgResult<()> { 258 | let shell = match shell_kind { 259 | "zsh" => shells::Shell::Zsh, 260 | "bash" => shells::Shell::Bash, 261 | "fish" => shells::Shell::Fish, 262 | "elvish" => shells::Shell::Elvish, 263 | "powershell" => shells::Shell::PowerShell, 264 | _ => { 265 | anyhow::bail!("Unknown shell kind {}, assuming zsh", shell_kind); 266 | } 267 | }; 268 | generate( 269 | shell, 270 | &mut Args::command(), 271 | "borgtui", 272 | &mut std::io::stdout(), 273 | ); 274 | Ok(()) 275 | } 276 | 277 | pub(crate) fn get_args() -> Args { 278 | Args::parse() 279 | } 280 | -------------------------------------------------------------------------------- /src/gui/gui.rs: -------------------------------------------------------------------------------- 1 | use iced::{Element, Sandbox, Settings}; 2 | 3 | pub fn iced_main() -> iced::Result { 4 | Hello::run(Settings::default()) 5 | } 6 | 7 | struct Hello; 8 | 9 | impl Sandbox for Hello { 10 | type Message = (); 11 | 12 | fn new() -> Hello { 13 | Hello 14 | } 15 | 16 | fn title(&self) -> String { 17 | String::from("A cool application") 18 | } 19 | 20 | fn update(&mut self, _message: Self::Message) { 21 | // This application has no interactions 22 | } 23 | 24 | fn view(&self) -> Element { 25 | "Hello, world!".into() 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/gui/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod gui; 2 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; 3 | use std::sync::{Arc, Mutex}; 4 | use std::thread::JoinHandle; 5 | 6 | use anyhow::{anyhow, bail, Context}; 7 | use backends::borg_provider::hack_unmount; 8 | use chrono::Duration; 9 | use notify::Watcher; 10 | use profiles::{BorgV1OptionsBuilder, RepositoryKind, RepositoryOptions}; 11 | use tokio::io::AsyncWriteExt; 12 | use tokio::sync::{mpsc, Semaphore}; 13 | use tracing::{error, info, warn}; 14 | use tracing_subscriber::FmtSubscriber; 15 | use types::{log_on_error, show_notification, DirectoryFinder, EXTENDED_NOTIFICATION_DURATION}; 16 | use types::{BackupCreationProgress, CommandResponseSender}; 17 | use walkdir::WalkDir; 18 | 19 | use crate::borgtui::{BorgTui, Command, CommandResponse}; 20 | use crate::cli::Action; 21 | use crate::profiles::{Encryption, Profile, Repository}; 22 | use crate::types::{send_error, send_info, BorgResult, PrettyBytes}; 23 | 24 | mod backends; 25 | mod borgtui; 26 | mod cli; 27 | mod profiles; 28 | mod types; 29 | 30 | const QUEUE_SIZE: usize = 1000; 31 | 32 | /// Open a file path in a detached GUI file manager. 33 | fn open_path_in_gui_file_manager>(path: P) -> BorgResult<()> { 34 | open::that_detached(path.as_ref())?; 35 | Ok(()) 36 | } 37 | 38 | fn determine_directory_size( 39 | path: PathBuf, 40 | byte_count: Arc, 41 | exclude_patterns: Vec, 42 | ) { 43 | let patterns = exclude_patterns 44 | .iter() 45 | .map(|s| glob::Pattern::new(s.as_str())) 46 | .collect::, _>>(); 47 | let patterns = match patterns { 48 | Ok(pat) => pat, 49 | Err(e) => { 50 | error!( 51 | "Failed to create glob patterns from exclude_patterns: {}", 52 | e 53 | ); 54 | vec![] 55 | } 56 | }; 57 | let all_files = WalkDir::new(path).into_iter().filter_entry(|entry| { 58 | !patterns 59 | .iter() 60 | .any(|pattern| pattern.matches_path(entry.path())) 61 | }); 62 | for entry in all_files { 63 | let entry = match entry { 64 | Ok(entry) => entry, 65 | Err(e) => { 66 | error!("Failed to read entry: {}", e); 67 | continue; 68 | } 69 | }; 70 | match entry.metadata() { 71 | Ok(metadata) => { 72 | byte_count.fetch_add(metadata.len(), Ordering::SeqCst); 73 | } 74 | Err(e) => { 75 | error!("Failed to obtain metadata for entry {:?}: {}", entry, e); 76 | } 77 | } 78 | } 79 | } 80 | 81 | /// Returns Ok(true) to exit the program. 82 | async fn handle_tui_command( 83 | command: Command, 84 | command_response_send: CommandResponseSender, 85 | directory_finder: Arc>, 86 | ) -> BorgResult { 87 | match command { 88 | Command::CreateBackup(profile) => { 89 | send_info!( 90 | command_response_send, 91 | format!("Starting backup of profile {}", &profile), 92 | "Failed to send backup start signal: {}" 93 | ); 94 | profile.create_backup(command_response_send).await?; 95 | Ok(false) 96 | } 97 | Command::UpdateProfileAndSave(mut profile, op, signal_success) => { 98 | profile.apply_operation(op).await?; 99 | profile.save_profile().await?; 100 | send_info!( 101 | command_response_send, 102 | format!("Saved profile '{}'", profile.name()), 103 | "Failed to send 'Saved profile' message: {}" 104 | ); 105 | command_response_send 106 | .send(CommandResponse::ProfileUpdated(profile)) 107 | .await?; 108 | signal_success.store(true, Ordering::SeqCst); 109 | Ok(false) 110 | } 111 | Command::SaveProfile(profile) => { 112 | send_info!( 113 | command_response_send, 114 | format!("Saved profile '{}'", profile.name()), 115 | "Failed to save profile: {}" 116 | ); 117 | if let Err(e) = profile.save_profile().await { 118 | send_error!( 119 | command_response_send, 120 | format!("Failed to save profile: {}", e) 121 | ); 122 | }; 123 | Ok(false) 124 | } 125 | Command::DetermineDirectorySize(path, byte_count_atomic, exclude_patterns) => { 126 | tokio::task::spawn_blocking(|| { 127 | determine_directory_size(path, byte_count_atomic, exclude_patterns) 128 | }); 129 | Ok(false) 130 | } 131 | Command::ListArchives(repo) => { 132 | tokio::spawn(async move { 133 | match repo.list_archives().await { 134 | Ok(res) => { 135 | if let Err(e) = command_response_send 136 | .send(CommandResponse::ListArchiveResult(res)) 137 | .await 138 | { 139 | error!("Failed to send ListArchiveResult for {}: {}", repo, e); 140 | } 141 | } 142 | Err(e) => { 143 | error!("Failed to list archives for {}: {}", repo, e); 144 | } 145 | } 146 | }); 147 | Ok(false) 148 | } 149 | Command::CheckRepository(repo) => { 150 | tokio::spawn(async move { 151 | send_info!( 152 | command_response_send, 153 | format!("Checking {}", repo), 154 | "Failed to send start checking info: {}" 155 | ); 156 | log_on_error!( 157 | repo.check(command_response_send).await, 158 | "Failed to check: {}" 159 | ); 160 | }); 161 | Ok(false) 162 | } 163 | Command::Compact(repo) => { 164 | tokio::spawn(async move { 165 | send_info!( 166 | command_response_send, 167 | format!("Compacting {}", repo), 168 | "Failed to send start compacting info: {}" 169 | ); 170 | if let Err(e) = repo.compact(command_response_send.clone()).await { 171 | send_error!(command_response_send, format!("Failed to compact: {}", e)); 172 | } else { 173 | send_info!(command_response_send, format!("Compacted {}", repo)); 174 | } 175 | }); 176 | Ok(false) 177 | } 178 | Command::Prune(repo, prune_options) => { 179 | tokio::spawn(async move { 180 | send_info!( 181 | command_response_send, 182 | format!("Pruning {}", repo), 183 | "Failed to send start prune info: {}" 184 | ); 185 | 186 | if let Err(e) = repo 187 | .prune(prune_options, command_response_send.clone()) 188 | .await 189 | { 190 | send_error!(command_response_send, format!("Failed to prune: {}", e)) 191 | } else { 192 | send_info!(command_response_send, format!("Pruned {}", repo)); 193 | } 194 | }); 195 | Ok(false) 196 | } 197 | Command::GetDirectorySuggestionsFor(directory) => { 198 | // TODO: This blocks command handling, right? 199 | tokio::task::spawn_blocking(move || { 200 | let mut dir_finder = 201 | log_on_error!(directory_finder.lock(), "failed to lock dir_finder: {}"); 202 | log_on_error!( 203 | dir_finder.update_guess(&directory), 204 | "failed to update guess: {}" 205 | ); 206 | let suggestions = log_on_error!( 207 | dir_finder.suggestions(&directory, 30), 208 | "failed to obtain suggestions: {}" 209 | ); 210 | log_on_error!( 211 | command_response_send 212 | .blocking_send(CommandResponse::SuggestionResults(suggestions)), 213 | "Failed to send suggestion results: {}" 214 | ); 215 | }); 216 | Ok(false) 217 | } 218 | Command::Mount(repo, repo_or_archive, mountpoint) => { 219 | let mountpoint_p = PathBuf::from(mountpoint.clone()); 220 | tokio::spawn(async move { 221 | if let Err(e) = repo 222 | .mount(repo_or_archive.clone(), mountpoint_p.clone()) 223 | .await 224 | { 225 | send_error!(command_response_send, format!("Failed to mount: {}", e)) 226 | } else { 227 | send_info!( 228 | command_response_send, 229 | format!("Successfully mounted {}", repo_or_archive) 230 | ); 231 | log_on_error!( 232 | command_response_send 233 | .send(CommandResponse::MountResult(repo_or_archive, mountpoint)) 234 | .await, 235 | "Failed to send suggestion results: {}" 236 | ); 237 | if let Err(e) = open_path_in_gui_file_manager(mountpoint_p) { 238 | send_error!( 239 | command_response_send, 240 | format!("Failed to open file manager: {}", e.to_string()) 241 | ); 242 | } 243 | } 244 | }); 245 | Ok(false) 246 | } 247 | Command::Unmount(mountpoint) => { 248 | // TODO: Properly join all of this. 249 | tokio::spawn(async move { 250 | match hack_unmount(PathBuf::from(mountpoint.clone())).await { 251 | Ok(_) => { 252 | send_info!( 253 | command_response_send, 254 | format!("Successfully unmounted {}", mountpoint) 255 | ) 256 | } 257 | Err(e) => { 258 | send_error!( 259 | command_response_send, 260 | format!("Failed to unmount: {}", e.to_string()) 261 | ); 262 | } 263 | } 264 | }); 265 | Ok(false) 266 | } 267 | Command::Quit => Ok(true), 268 | } 269 | } 270 | 271 | fn watch_profile_for_changes( 272 | profile_path: PathBuf, 273 | response_send: CommandResponseSender, 274 | ) -> BorgResult<()> { 275 | let profile_path_clone = profile_path.clone(); 276 | let mut watcher = 277 | notify::recommended_watcher(move |res: Result| match res { 278 | Ok(event) => { 279 | let is_modify = event.kind.is_modify(); 280 | tracing::debug!("is_modify: {}", is_modify); 281 | if is_modify { 282 | match Profile::blocking_open_path(profile_path_clone.clone()) { 283 | Ok(profile) => { 284 | if let Err(e) = response_send 285 | .blocking_send(CommandResponse::ProfileUpdated(profile)) 286 | { 287 | error!("Failed to send update profile message: {}", e) 288 | } 289 | } 290 | Err(e) => error!("Failed to read profile after modify update: {}", e), 291 | } 292 | } 293 | } 294 | Err(e) => error!( 295 | "Error while watching path <{}>: {}", 296 | profile_path_clone.to_string_lossy(), 297 | e 298 | ), 299 | })?; 300 | 301 | std::thread::spawn(move || loop { 302 | if let Err(e) = watcher.watch(&profile_path, notify::RecursiveMode::NonRecursive) { 303 | error!("Failed to watcher.watch: {}", e) 304 | } 305 | }); 306 | 307 | Ok(()) 308 | } 309 | 310 | async fn setup_tui(profile: Option, watch_profile: bool) -> BorgResult> { 311 | let profile = Profile::open_or_create(&profile).await?; 312 | let (command_send, mut command_recv) = mpsc::channel::(QUEUE_SIZE); 313 | let (response_send, response_recv) = mpsc::channel::(QUEUE_SIZE); 314 | 315 | // Profile watcher (sends updates when the config file is manually edited) 316 | if watch_profile { 317 | watch_profile_for_changes(profile.profile_path()?, response_send.clone())?; 318 | } 319 | 320 | // Directory Finder 321 | let mut dir_finder = DirectoryFinder::new(); 322 | if let Err(e) = dir_finder.seed_exclude_patterns(profile.exclude_patterns()) { 323 | error!("Failed to add exclude patterns: {}", e); 324 | } 325 | let dir_finder = Arc::new(Mutex::new(dir_finder)); 326 | let res = std::thread::spawn(move || { 327 | let mut tui = BorgTui::new(profile, command_send, response_recv); 328 | if let Err(e) = tui.run() { 329 | error!("Failed to run tui: {}", e); 330 | } 331 | }); 332 | while let Some(command) = command_recv.recv().await { 333 | match handle_tui_command(command, response_send.clone(), dir_finder.clone()).await { 334 | Ok(true) => return Ok(res), 335 | Err(e) => { 336 | error!("Failed to handle tui command: {}", e); 337 | send_error!(response_send, format!("{}", e)); 338 | } 339 | _ => {} 340 | } 341 | } 342 | Ok(res) 343 | } 344 | 345 | async fn handle_command_response(command_response_recv: mpsc::Receiver) { 346 | let mut command_response_recv = command_response_recv; 347 | while let Some(message) = command_response_recv.recv().await { 348 | match message { 349 | CommandResponse::CreateProgress(msg) => match msg.create_progress { 350 | BackupCreationProgress::InProgress { 351 | original_size, 352 | compressed_size, 353 | deduplicated_size, 354 | num_files, 355 | current_path, 356 | } => info!( 357 | "[{}] {}: {} -> {} -> {} ({} files)", 358 | msg.repository, 359 | current_path, 360 | PrettyBytes(original_size), 361 | PrettyBytes(compressed_size), 362 | PrettyBytes(deduplicated_size), 363 | num_files 364 | ), 365 | BackupCreationProgress::Finished => { 366 | info!("Finished backup for {}", msg.repository) 367 | } 368 | }, 369 | CommandResponse::CheckProgress(check_progress) => { 370 | info!("[{}] {}", check_progress.repo_loc, check_progress.message); 371 | } 372 | CommandResponse::CheckComplete(check_complete) => { 373 | match check_complete.error { 374 | Some(error) => error!( 375 | "[{}] Check failed with message: {}", 376 | check_complete.repo_loc, error 377 | ), 378 | None => info!( 379 | "[{}] Check successfully completed!", 380 | check_complete.repo_loc 381 | ), 382 | }; 383 | } 384 | CommandResponse::Info(info_log) => info!("{}", info_log), 385 | CommandResponse::ListArchiveResult(list_archive_result) => { 386 | // TODO: Print this out in a more informative way 387 | info!("{:?}", list_archive_result) 388 | } 389 | CommandResponse::SuggestionResults(_) => { 390 | error!("Received SuggestionResults in non-interactive!") 391 | } 392 | CommandResponse::MountResult(_, _) => { 393 | error!("Received MountResult in non-interactive!") 394 | } 395 | CommandResponse::Error(error_message) => error!(error_message), 396 | CommandResponse::ProfileUpdated(_profile) => info!("Profile updated."), 397 | } 398 | } 399 | } 400 | 401 | fn generate_system_unit(profile_name: &str, timer: bool, action: &str, calendar: &str) -> String { 402 | if timer { 403 | format!( 404 | "[Unit] 405 | Description=Run BorgTUI {action} on a schedule (\"{calendar}\") 406 | 407 | [Timer] 408 | OnCalendar={calendar} 409 | Persistent=true 410 | 411 | [Install] 412 | WantedBy=timers.target" 413 | ) 414 | } else { 415 | format!( 416 | "[Unit] 417 | Description=BorgTUI {action} for profile `{profile_name}` 418 | 419 | [Service] 420 | Type=simple 421 | ExecStart=borgtui -p {profile_name} {action} 422 | 423 | [Install] 424 | WantedBy=default.target 425 | " 426 | ) 427 | } 428 | } 429 | 430 | async fn handle_action( 431 | action: Action, 432 | profile_name: Option, 433 | command_response_send: CommandResponseSender, 434 | ) -> BorgResult<()> { 435 | match action { 436 | Action::Init { 437 | passphrase_loc, 438 | location, 439 | kind, 440 | rsh, 441 | } => { 442 | let mut profile = Profile::open_or_create(&profile_name).await?; 443 | // TODO: Refactor this logic to be cleaner, don't set the keyfile 444 | // when calling Encryption::from_passphrase_loc 445 | let encryption = Encryption::from_passphrase_loc(passphrase_loc.clone())?; 446 | if let Encryption::None = encryption { 447 | warn!("Initializing Repository without encryption."); 448 | } 449 | let passphrase = passphrase_loc.get_passphrase()?; 450 | let config = match kind { 451 | RepositoryKind::Borg => { 452 | RepositoryOptions::BorgV1(BorgV1OptionsBuilder::new().rsh(rsh).build()) 453 | } 454 | RepositoryKind::Rustic => RepositoryOptions::Rustic(Default::default()), 455 | }; 456 | 457 | let mut new_repo = Repository::new(location.clone(), encryption.clone(), config); 458 | new_repo.set_passphrase(encryption, passphrase)?; 459 | new_repo.init().await?; 460 | profile.add_repository(new_repo); 461 | profile.save_profile().await?; 462 | info!("Initialized Repository '{}' in {}", location, profile); 463 | Ok(()) 464 | } 465 | Action::Create => { 466 | let profile = Profile::open_or_create(&profile_name).await?; 467 | info!("Creating backup for profile {}", profile); 468 | let handle = profile 469 | .create_backup_with_notification(command_response_send) 470 | .await?; 471 | handle.await?; 472 | Ok(()) 473 | } 474 | Action::Add { directory } => { 475 | let mut profile = Profile::open_or_create(&profile_name).await?; 476 | profile.add_backup_path(directory.clone()).await?; 477 | profile.save_profile().await?; 478 | info!("Added {} to profile {}", directory.display(), profile); 479 | Ok(()) 480 | } 481 | Action::Remove { directory } => { 482 | let mut profile = Profile::open_or_create(&profile_name).await?; 483 | profile.remove_backup_path(&directory); 484 | profile.save_profile().await?; 485 | info!("Removed {} from profile {}", directory.display(), profile); 486 | Ok(()) 487 | } 488 | Action::AddRepo { 489 | repository, 490 | passphrase_loc, 491 | kind, 492 | rsh, 493 | } => { 494 | // TODO: Check if repo is valid (maybe once "borg info" or something works) 495 | let mut profile = Profile::open_or_create(&profile_name).await?; 496 | if profile.has_repository(&repository) { 497 | bail!( 498 | "Repository {} already exists in profile {}", 499 | repository, 500 | profile 501 | ); 502 | } 503 | let config = match kind { 504 | RepositoryKind::Borg => { 505 | RepositoryOptions::BorgV1(BorgV1OptionsBuilder::new().rsh(rsh).build()) 506 | } 507 | RepositoryKind::Rustic => RepositoryOptions::Rustic(Default::default()), 508 | }; 509 | let repo = Repository::new( 510 | repository.clone(), 511 | Encryption::from_passphrase_loc(passphrase_loc)?, 512 | config, 513 | ); 514 | profile.add_repository(repo); 515 | profile.save_profile().await?; 516 | info!("Added repository {} to profile {}", repository, profile); 517 | Ok(()) 518 | } 519 | Action::List { 520 | repository, 521 | all, 522 | count, 523 | } => { 524 | let profile = Profile::open_or_create(&profile_name).await?; 525 | let timeout_duration_secs = profile.action_timeout_seconds() as i64; 526 | for repo in profile.active_repositories().filter(|repo| { 527 | repository 528 | .as_ref() 529 | .map(|rr| rr.as_str() == repo.path.as_str()) 530 | .unwrap_or(true) 531 | }) { 532 | let list_archives_per_repo = match tokio::time::timeout( 533 | Duration::seconds(timeout_duration_secs).to_std().unwrap(), 534 | repo.list_archives(), 535 | ) 536 | .await 537 | { 538 | Ok(list_archive_result) => list_archive_result?, 539 | Err(_timeout_error) => { 540 | error!( 541 | "Timeout ({}s) while attempting to list repo {}", 542 | timeout_duration_secs, repo 543 | ); 544 | continue; 545 | } 546 | }; 547 | let mut to_skip = 0; 548 | if !all { 549 | to_skip = list_archives_per_repo.archives.len().saturating_sub(count); 550 | } 551 | for archives in list_archives_per_repo.archives.iter().skip(to_skip) { 552 | info!("{}::{}", repo.path(), archives.name); 553 | } 554 | } 555 | Ok(()) 556 | } 557 | Action::ListRepos => { 558 | let profile = Profile::open_or_create(&profile_name).await?; 559 | for repo in profile.repositories() { 560 | let mut extra_info = ""; 561 | if repo.disabled() { 562 | extra_info = " (DISABLED)"; 563 | } 564 | println!("{}{}", &repo.path, extra_info); 565 | } 566 | Ok(()) 567 | } 568 | Action::UpdateConfig => { 569 | let profile = Profile::open_or_create(&profile_name).await?; 570 | profile.save_profile().await?; 571 | info!("Updated config for {}", profile); 572 | Ok(()) 573 | } 574 | Action::SetPassword { 575 | repo, 576 | passphrase_loc, 577 | } => { 578 | let mut profile = Profile::open_or_create(&profile_name).await?; 579 | let borg_passphrase = passphrase_loc.get_passphrase()?; 580 | let encryption = Encryption::from_passphrase_loc(passphrase_loc)?; 581 | profile.update_repository_password(&repo, encryption.clone(), borg_passphrase)?; 582 | profile.save_profile().await?; 583 | info!("Updated password for {} (method: {:?})", repo, encryption); 584 | Ok(()) 585 | } 586 | Action::Compact => { 587 | let profile = Profile::open_or_create(&profile_name).await?; 588 | for repo in profile.active_repositories() { 589 | repo.compact(command_response_send.clone()).await?; 590 | info!("Finished compacting {}", repo); 591 | } 592 | Ok(()) 593 | } 594 | Action::Prune => { 595 | let profile = Profile::open_or_create(&profile_name).await?; 596 | for repo in profile.active_repositories() { 597 | repo.prune(profile.prune_options(), command_response_send.clone()) 598 | .await?; 599 | info!("Finished pruning {}", repo); 600 | } 601 | Ok(()) 602 | } 603 | Action::Check { only_these_repos } => { 604 | let profile = Profile::open_or_create(&profile_name).await?; 605 | let check_semaphore = Arc::new(Semaphore::new(0)); 606 | let successful = Arc::new(AtomicBool::new(true)); 607 | for repo in profile.active_repositories() { 608 | let should_check = only_these_repos 609 | .as_ref() 610 | .map(|repos_to_check| repos_to_check.contains(&repo.path())) 611 | .unwrap_or(true); 612 | if !should_check { 613 | tracing::info!("Skipping verification of {}", repo.path()); 614 | check_semaphore.add_permits(1); 615 | continue; 616 | } 617 | tracing::info!("Starting verification of {}", repo.path()); 618 | let successful_clone = successful.clone(); 619 | let check_semaphore_clone = check_semaphore.clone(); 620 | let repo_clone = repo.clone(); 621 | let progress_channel = command_response_send.clone(); 622 | // TODO: do this spawn inside of the provider 623 | tokio::spawn(async move { 624 | let res = match repo_clone.check(progress_channel).await { 625 | Ok(res) => res, 626 | Err(e) => { 627 | error!("Verification failed: {e}"); 628 | false 629 | } 630 | }; 631 | if !res { 632 | tracing::error!("Verification failed for repository: {}", repo_clone); 633 | log_on_error!( 634 | show_notification( 635 | &format!("Verification Failed for {}!", repo_clone), 636 | "Please check BorgTUI's logs for more information.", 637 | EXTENDED_NOTIFICATION_DURATION, 638 | ) 639 | .await, 640 | "Failed to show notification popup: {}" 641 | ); 642 | } 643 | 644 | successful_clone.fetch_and(res, Ordering::SeqCst); 645 | check_semaphore_clone.add_permits(1); 646 | }); 647 | } 648 | let _ = check_semaphore 649 | .acquire_many(profile.num_active_repositories() as u32) 650 | .await?; 651 | let title = if successful.load(Ordering::SeqCst) { 652 | "Backup Verification Successful!" 653 | } else { 654 | "Backup Verification FAILED!" 655 | }; 656 | let message = format!("Profile: {}", profile.name()); 657 | info!("{}", message); 658 | show_notification(title, &message, EXTENDED_NOTIFICATION_DURATION).await?; 659 | Ok(()) 660 | } 661 | Action::Repair { only_these_repos } => { 662 | let profile = Profile::open_or_create(&profile_name).await?; 663 | let repair_semaphore = Arc::new(Semaphore::new(0)); 664 | let successful = Arc::new(AtomicBool::new(true)); 665 | for repo in profile.active_repositories() { 666 | let should_check = only_these_repos 667 | .as_ref() 668 | .map(|repos_to_repair| repos_to_repair.contains(&repo.path())) 669 | .unwrap_or(true); 670 | if !should_check { 671 | tracing::info!("Skipping repair of {}", repo.path()); 672 | repair_semaphore.add_permits(1); 673 | continue; 674 | } 675 | tracing::info!("Starting repair of {}", repo.path()); 676 | let successful_clone = successful.clone(); 677 | let repair_semaphore_clone = repair_semaphore.clone(); 678 | let repo_clone = repo.clone(); 679 | let progress_channel = command_response_send.clone(); 680 | // TODO: do this spawn inside of the provider 681 | tokio::spawn(async move { 682 | let res = match repo_clone.repair(progress_channel).await { 683 | Ok(res) => res, 684 | Err(e) => { 685 | error!("Repair failed: {e}"); 686 | false 687 | } 688 | }; 689 | if !res { 690 | tracing::error!("Repair failed for repository: {}", repo_clone); 691 | log_on_error!( 692 | show_notification( 693 | &format!("Repair failed for {}!", repo_clone), 694 | "Please check BorgTUI's logs for more information.", 695 | EXTENDED_NOTIFICATION_DURATION, 696 | ) 697 | .await, 698 | "Failed to show notification popup: {}" 699 | ); 700 | } 701 | 702 | successful_clone.fetch_and(res, Ordering::SeqCst); 703 | repair_semaphore_clone.add_permits(1); 704 | }); 705 | } 706 | let _ = repair_semaphore 707 | .acquire_many(profile.num_active_repositories() as u32) 708 | .await?; 709 | let title = if successful.load(Ordering::SeqCst) { 710 | "Backup Repair Successful!" 711 | } else { 712 | "Backup Repair FAILED!" 713 | }; 714 | let message = format!("Profile: {}", profile.name()); 715 | info!("{}", message); 716 | show_notification(title, &message, EXTENDED_NOTIFICATION_DURATION).await?; 717 | Ok(()) 718 | } 719 | Action::AddProfile { name } => { 720 | let profile = match Profile::open_profile(&name).await { 721 | Ok(Some(profile)) => bail!("Error: {} already exists", profile), 722 | Ok(None) => Profile::create_profile(&name).await?, 723 | Err(e) => bail!( 724 | "[{}] exists but encountered an error while reading: {:#}", 725 | Profile::profile_path_for_name(&name)?.to_string_lossy(), 726 | e 727 | ), 728 | }; 729 | info!( 730 | "Created {} ({})", 731 | profile, 732 | profile 733 | .profile_path() 734 | .unwrap_or("unknown_path".into()) 735 | .to_string_lossy() 736 | ); 737 | Ok(()) 738 | } 739 | Action::Mount { 740 | repository_path, 741 | mountpoint, 742 | do_not_open_in_gui_file_manager, 743 | } => { 744 | let profile = Profile::open_or_create(&profile_name).await?; 745 | let repo = profile.find_repo_from_mount_src(&repository_path)?; 746 | repo.mount(repository_path, mountpoint.clone()).await?; 747 | if !do_not_open_in_gui_file_manager { 748 | if let Err(e) = open_path_in_gui_file_manager(mountpoint) { 749 | error!("Failed to open GUI file manager: {}", e); 750 | } 751 | } 752 | Ok(()) 753 | } 754 | Action::Umount { mountpoint } => { 755 | hack_unmount(mountpoint.clone()).await?; 756 | info!("Successfully unmounted {}", mountpoint.to_string_lossy()); 757 | Ok(()) 758 | } 759 | Action::SystemdCreateUnit { 760 | install, 761 | install_path, 762 | timer, 763 | check_unit, 764 | } => { 765 | let profile = Profile::open_or_create(&profile_name).await?; 766 | let (action, calendar) = if check_unit { 767 | ("check", "monthly") 768 | } else { 769 | ("create", "*-*-* 21:00:00") 770 | }; 771 | let systemd_unit_contents = 772 | generate_system_unit(profile.name(), timer, action, calendar); 773 | let extension = if timer { "timer" } else { "service" }; 774 | if install || install_path.is_some() { 775 | let home_dir = dirs::home_dir() 776 | .ok_or_else(|| anyhow!("Couldn't find a home directory. Is $HOME set?"))?; 777 | let install_path = install_path.unwrap_or_else(|| { 778 | home_dir.join(format!( 779 | ".config/systemd/user/borgtui-{}-{}.{}", 780 | action, 781 | profile.name(), 782 | extension 783 | )) 784 | }); 785 | info!("{:?}", install_path); 786 | if let Some(parent_path) = install_path.parent() { 787 | tokio::fs::create_dir_all(parent_path).await?; 788 | } 789 | tokio::fs::File::create(&install_path) 790 | .await? 791 | .write_all(systemd_unit_contents.as_bytes()) 792 | .await?; 793 | let unit_type = if timer { "timer unit" } else { "create unit" }; 794 | info!( 795 | "Installed systemd {} for {} at {}", 796 | unit_type, 797 | profile, 798 | install_path.to_string_lossy() 799 | ); 800 | } else { 801 | println!("{}", systemd_unit_contents) 802 | } 803 | Ok(()) 804 | } 805 | 806 | Action::ConfigPath => { 807 | let profile = Profile::open_or_create(&profile_name).await?; 808 | println!( 809 | "{}", 810 | Profile::profile_path_for_name(profile.name())?.to_string_lossy() 811 | ); 812 | Ok(()) 813 | } 814 | Action::ShellCompletion { shell } => { 815 | cli::print_shell_completion(&shell)?; 816 | Ok(()) 817 | } 818 | Action::InstallManPages { man_root } => cli::print_manpage(man_root).await, 819 | } 820 | } 821 | 822 | fn main() -> BorgResult<()> { 823 | let args = cli::get_args(); 824 | let is_noninteractive = args.action.is_some(); 825 | let file_appender = tracing_appender::rolling::hourly("/tmp", "borgtui.log"); 826 | let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender); 827 | let subscriber = FmtSubscriber::builder().with_max_level(tracing::Level::DEBUG); 828 | if is_noninteractive { 829 | tracing::subscriber::set_global_default(subscriber.finish()) 830 | .with_context(|| "setting default subscriber failed")?; 831 | } else { 832 | tracing::subscriber::set_global_default(subscriber.with_writer(non_blocking).finish()) 833 | .with_context(|| "setting default subscriber failed")?; 834 | } 835 | 836 | let mut tui_join_handle = None; 837 | tokio::runtime::Builder::new_multi_thread() 838 | .enable_all() 839 | .build()? 840 | .block_on(async { 841 | let res = match args.action { 842 | Some(action) => { 843 | let (send, recv) = mpsc::channel::(QUEUE_SIZE); 844 | let handle = tokio::spawn(async move { handle_command_response(recv).await }); 845 | if let Err(e) = handle_action(action, args.borgtui_profile, send).await { 846 | error!("Error handling CLI action: {}", e) 847 | }; 848 | handle.await 849 | } 850 | None => { 851 | match setup_tui(args.borgtui_profile, args.watch_profile).await { 852 | Ok(join_handle) => tui_join_handle = Some(join_handle), 853 | Err(e) => error!("Failed to setup tui: {}", e), 854 | } 855 | Ok(()) 856 | } 857 | }; 858 | 859 | if let Err(e) = res { 860 | error!("Error: {}", e); 861 | std::process::exit(1); 862 | } 863 | }); 864 | if let Some(join_handle) = tui_join_handle { 865 | join_handle.join().unwrap(); 866 | } 867 | Ok(()) 868 | } 869 | -------------------------------------------------------------------------------- /src/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cli; 2 | -------------------------------------------------------------------------------- /src/profiles.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::Write, 3 | num::NonZeroU16, 4 | os::unix::prelude::PermissionsExt, 5 | path::{Path, PathBuf}, 6 | str::FromStr, 7 | sync::Arc, 8 | time::Instant, 9 | }; 10 | 11 | use crate::{ 12 | backends::{ 13 | backup_provider::BackupProvider, borg_provider::BorgProvider, 14 | rustic_provider::RusticProvider, 15 | }, 16 | cli::PassphraseSource, 17 | types::{ 18 | log_on_error, show_notification, BorgResult, CommandResponseSender, RepositoryArchives, 19 | SHORT_NOTIFICATION_DURATION, 20 | }, 21 | }; 22 | use anyhow::anyhow; 23 | use anyhow::{bail, Context}; 24 | use keyring::Entry; 25 | use std::fs; 26 | use tracing::info; 27 | 28 | use serde::{Deserialize, Serialize}; 29 | use tokio::sync::{Mutex, Semaphore}; 30 | 31 | #[derive(Serialize, Deserialize, Clone)] 32 | pub(crate) struct Passphrase(String); 33 | 34 | impl Passphrase { 35 | pub(crate) fn inner(&self) -> String { 36 | self.0.clone() 37 | } 38 | 39 | pub(crate) fn inner_ref(&self) -> &str { 40 | &self.0 41 | } 42 | } 43 | 44 | impl std::fmt::Debug for Passphrase { 45 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 46 | write!(f, "Passphrase") 47 | } 48 | } 49 | 50 | impl AsRef for Passphrase { 51 | fn as_ref(&self) -> &str { 52 | self.0.as_str() 53 | } 54 | } 55 | 56 | impl From for Passphrase { 57 | fn from(value: String) -> Self { 58 | Self(value) 59 | } 60 | } 61 | 62 | impl From<&str> for Passphrase { 63 | fn from(value: &str) -> Self { 64 | Self(value.to_string()) 65 | } 66 | } 67 | 68 | #[derive(Serialize, Deserialize, Debug, Clone)] 69 | pub(crate) enum Encryption { 70 | None, 71 | Raw(Passphrase), 72 | Keyring, 73 | Keyfile(PathBuf), 74 | } 75 | 76 | impl Encryption { 77 | pub(crate) fn from_passphrase_loc(passphrase_loc: PassphraseSource) -> BorgResult { 78 | if passphrase_loc.raw { 79 | if let Some(borg_passphrase) = passphrase_loc.borg_passphrase { 80 | return Ok(Encryption::Raw(borg_passphrase)); 81 | } 82 | } 83 | match (passphrase_loc.keyfile, passphrase_loc.borg_passphrase) { 84 | (Some(keyfile), None) => Ok(Encryption::Keyfile(keyfile)), 85 | (Some(keyfile), Some(borg_passphrase)) => { 86 | let keyfile_path = Path::new(&keyfile); 87 | if !keyfile_path.exists() { 88 | let mut file = std::fs::File::create(keyfile_path)?; 89 | let mut permissions = file.metadata()?.permissions(); 90 | permissions.set_mode(0o600); 91 | file.set_permissions(permissions)?; 92 | file.write_all(borg_passphrase.inner_ref().as_bytes())?; 93 | } else { 94 | tracing::warn!("Keyfile exists and BORG_PASSPHRASE set in the environment. Ignoring BORG_PASSPHRASE and not updating the keyfile!"); 95 | } 96 | Ok(Encryption::Keyfile(keyfile)) 97 | } 98 | (None, Some(_borg_passphrase)) => Ok(Encryption::Keyring), 99 | (None, None) => Ok(Encryption::None), 100 | } 101 | } 102 | } 103 | 104 | #[derive(Debug, Serialize, Deserialize, Clone, Copy)] 105 | pub(crate) enum RepositoryKind { 106 | Borg, 107 | Rustic, 108 | } 109 | 110 | impl std::fmt::Display for RepositoryKind { 111 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 112 | let sort = match self { 113 | RepositoryKind::Borg => "borg".to_string(), 114 | RepositoryKind::Rustic => "rustic".to_string(), 115 | }; 116 | write!(f, "{sort}") 117 | } 118 | } 119 | 120 | impl FromStr for RepositoryKind { 121 | type Err = anyhow::Error; 122 | 123 | fn from_str(s: &str) -> Result { 124 | match s { 125 | "borg" => Ok(RepositoryKind::Borg), 126 | "Borg" => Ok(RepositoryKind::Borg), 127 | "rustic" => Ok(RepositoryKind::Rustic), 128 | "Rustic" => Ok(RepositoryKind::Rustic), 129 | otherwise => Err(anyhow::anyhow!("Unknown repository kind: {otherwise}")), 130 | } 131 | } 132 | } 133 | 134 | const fn default_repository_kind() -> RepositoryKind { 135 | RepositoryKind::Borg 136 | } 137 | 138 | #[derive(Serialize, Deserialize, Debug, Clone)] 139 | pub(crate) struct RepositoryV1 { 140 | pub(crate) path: String, 141 | /// SSH command to use when connecting 142 | #[serde(default)] 143 | pub(crate) rsh: Option, 144 | encryption: Encryption, 145 | #[serde(default)] 146 | disabled: bool, 147 | #[serde(default = "default_repository_kind")] 148 | kind: RepositoryKind, 149 | } 150 | 151 | trait ToLatestRepository { 152 | fn to_latest(&self) -> Repository; 153 | } 154 | 155 | impl ToLatestRepository for RepositoryV1 { 156 | fn to_latest(&self) -> Repository { 157 | let config = match self.kind { 158 | RepositoryKind::Borg => { 159 | RepositoryOptions::BorgV1(BorgV1OptionsBuilder::new().rsh(self.rsh.clone()).build()) 160 | } 161 | RepositoryKind::Rustic => RepositoryOptions::Rustic(Default::default()), 162 | }; 163 | Repository { 164 | path: self.path.clone(), 165 | encryption: self.encryption.clone(), 166 | disabled: self.disabled, 167 | config, 168 | lock: Default::default(), 169 | } 170 | } 171 | } 172 | 173 | impl ToLatestRepository for Repository { 174 | fn to_latest(&self) -> Repository { 175 | self.clone() 176 | } 177 | } 178 | 179 | #[derive(Serialize, Deserialize, Debug, Clone)] 180 | pub(crate) struct Repository { 181 | pub(crate) path: String, 182 | /// SSH command to use when connecting 183 | encryption: Encryption, 184 | #[serde(default)] 185 | disabled: bool, 186 | config: RepositoryOptions, 187 | #[serde(skip)] 188 | pub(crate) lock: Arc>, 189 | } 190 | 191 | impl std::fmt::Display for Repository { 192 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 193 | write!(f, "Repository<{}>", self.path) 194 | } 195 | } 196 | 197 | fn get_keyring_entry(repo_path: &str) -> BorgResult { 198 | Entry::new("borgtui", repo_path).with_context(|| { 199 | format!( 200 | "Failed to create keyring entry for repository {}", 201 | repo_path 202 | ) 203 | }) 204 | } 205 | 206 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] 207 | pub(crate) struct BorgV1Options { 208 | pub(crate) rsh: Option, 209 | pub(crate) remote_path: Option, 210 | } 211 | 212 | pub(crate) struct BorgV1OptionsBuilder { 213 | rsh: Option, 214 | remote_path: Option, 215 | } 216 | 217 | impl BorgV1OptionsBuilder { 218 | pub(crate) fn new() -> Self { 219 | Self { 220 | rsh: None, 221 | remote_path: None, 222 | } 223 | } 224 | 225 | pub(crate) fn rsh(mut self, rsh: Option) -> Self { 226 | self.rsh = rsh; 227 | self 228 | } 229 | 230 | #[allow(unused)] 231 | pub(crate) fn remote_path(mut self, remote_path: Option) -> Self { 232 | self.remote_path = remote_path; 233 | self 234 | } 235 | 236 | pub(crate) fn build(self) -> BorgV1Options { 237 | BorgV1Options { 238 | rsh: self.rsh, 239 | remote_path: self.remote_path, 240 | } 241 | } 242 | } 243 | 244 | #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] 245 | pub(crate) struct RusticOptions {} 246 | 247 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] 248 | pub(crate) enum RepositoryOptions { 249 | BorgV1(BorgV1Options), 250 | Rustic(RusticOptions), 251 | } 252 | 253 | impl RepositoryOptions { 254 | // TODO: Does this API make sense? 255 | pub(crate) fn borg_options(&self) -> BorgResult { 256 | match self { 257 | RepositoryOptions::BorgV1(options) => Ok(options.clone()), 258 | _ => Err(anyhow::anyhow!( 259 | "borg_options called on non-borg repository" 260 | )), 261 | } 262 | } 263 | 264 | pub(crate) fn rustic_options(&self) -> BorgResult { 265 | match self { 266 | RepositoryOptions::Rustic(options) => Ok(options.clone()), 267 | _ => Err(anyhow::anyhow!( 268 | "rustic_options called on non-rustic repository" 269 | )), 270 | } 271 | } 272 | } 273 | 274 | impl Repository { 275 | pub(crate) fn new(path: String, encryption: Encryption, config: RepositoryOptions) -> Self { 276 | Self { 277 | path, 278 | encryption, 279 | config, 280 | disabled: false, 281 | lock: Default::default(), 282 | } 283 | } 284 | 285 | pub(crate) fn get_passphrase(&self) -> BorgResult> { 286 | match &self.encryption { 287 | Encryption::None => Ok(None), 288 | Encryption::Raw(passphrase) => Ok(Some(passphrase.clone())), 289 | Encryption::Keyring => get_keyring_entry(&self.path)? 290 | .get_password() 291 | .map_err(|e| anyhow::anyhow!("Failed to get passphrase from keyring: {}", e)) 292 | .map(|v| Some(Passphrase(v))), 293 | Encryption::Keyfile(filepath) => { 294 | let passphrase = fs::read_to_string(filepath) 295 | .with_context(|| format!("Failed to read {filepath:?}. Does the file exist?"))? 296 | .trim() 297 | .to_string(); 298 | Ok(Some(Passphrase(passphrase))) 299 | } 300 | } 301 | } 302 | 303 | pub(crate) fn set_passphrase( 304 | &mut self, 305 | encryption: Encryption, 306 | borg_passphrase: Option, 307 | ) -> BorgResult<()> { 308 | self.encryption = encryption; 309 | if matches!(self.encryption, Encryption::Keyring) { 310 | let entry = get_keyring_entry(&self.path)?; 311 | let borg_passphrase = borg_passphrase.ok_or_else(|| { 312 | anyhow!( 313 | "Keyring encryption is being used in {} but BORG_PASSPHRASE is unset!", 314 | self 315 | ) 316 | })?; 317 | entry 318 | .set_password(borg_passphrase.inner_ref()) 319 | .with_context(|| { 320 | format!( 321 | "Failed to set password in keyring for repository {} in profile {}", 322 | &self.path, &self 323 | ) 324 | })?; 325 | assert!(entry.get_password().is_ok()); 326 | } 327 | Ok(()) 328 | } 329 | 330 | /// If true, the repo has been disabled and actions will 331 | /// not be performed on it 332 | pub(crate) fn disabled(&self) -> bool { 333 | self.disabled 334 | } 335 | 336 | pub(crate) fn path(&self) -> String { 337 | self.path.clone() 338 | } 339 | 340 | pub(crate) fn path_ref(&self) -> &str { 341 | &self.path 342 | } 343 | 344 | pub(crate) fn borg_options(&self) -> BorgResult { 345 | self.config.borg_options() 346 | } 347 | 348 | #[allow(unused)] 349 | pub(crate) fn rustic_options(&self) -> BorgResult { 350 | self.config.rustic_options() 351 | } 352 | 353 | pub(crate) async fn list_archives(&self) -> BorgResult { 354 | self.backup_provider().list_archives(self).await 355 | } 356 | 357 | pub(crate) async fn init(&self) -> BorgResult<()> { 358 | self.backup_provider() 359 | .init_repo(self.path(), self.get_passphrase()?, self.config.clone()) 360 | .await?; 361 | Ok(()) 362 | } 363 | 364 | pub(crate) async fn mount( 365 | &self, 366 | given_repository_path: String, 367 | mountpoint: PathBuf, 368 | ) -> BorgResult<()> { 369 | self.backup_provider() 370 | .mount(self, given_repository_path, mountpoint) 371 | .await?; 372 | Ok(()) 373 | } 374 | 375 | pub(crate) async fn prune( 376 | &self, 377 | prune_options: PruneOptions, 378 | progress_channel: CommandResponseSender, 379 | ) -> BorgResult<()> { 380 | info!("Starting to prune {}", self); 381 | self.backup_provider() 382 | .prune(self, prune_options, progress_channel) 383 | .await 384 | } 385 | 386 | pub(crate) async fn compact(&self, progress_channel: CommandResponseSender) -> BorgResult<()> { 387 | self.backup_provider().compact(self, progress_channel).await 388 | } 389 | 390 | pub(crate) async fn check(&self, progress_channel: CommandResponseSender) -> BorgResult { 391 | self.backup_provider().check(self, progress_channel).await 392 | } 393 | 394 | pub(crate) async fn repair(&self, progress_channel: CommandResponseSender) -> BorgResult { 395 | self.backup_provider().repair(self, progress_channel).await 396 | } 397 | 398 | pub(crate) fn backup_provider(&self) -> Box { 399 | match self.config { 400 | RepositoryOptions::BorgV1(_) => Box::new(BorgProvider {}), 401 | RepositoryOptions::Rustic(_) => Box::new(RusticProvider {}), 402 | } 403 | } 404 | 405 | pub(crate) fn repo_kind_name(&self) -> &'static str { 406 | match self.config { 407 | RepositoryOptions::BorgV1(_) => "Borg", 408 | RepositoryOptions::Rustic(_) => "Rustic", 409 | } 410 | } 411 | } 412 | 413 | #[derive(Serialize, Deserialize, Debug, Clone, Copy)] 414 | pub(crate) struct PruneOptions { 415 | pub(crate) keep_daily: NonZeroU16, 416 | pub(crate) keep_weekly: NonZeroU16, 417 | pub(crate) keep_monthly: NonZeroU16, 418 | pub(crate) keep_yearly: NonZeroU16, 419 | } 420 | 421 | impl Default for PruneOptions { 422 | fn default() -> Self { 423 | Self { 424 | keep_daily: NonZeroU16::new(64).unwrap(), 425 | keep_weekly: NonZeroU16::new(128).unwrap(), 426 | keep_monthly: NonZeroU16::new(64).unwrap(), 427 | keep_yearly: NonZeroU16::new(32).unwrap(), 428 | } 429 | } 430 | } 431 | 432 | #[derive(Debug, Clone)] 433 | pub(crate) enum ProfileOperation { 434 | AddBackupPath(PathBuf), 435 | } 436 | 437 | // Necessary for serde(default) 438 | const fn default_action_timeout_seconds() -> u64 { 439 | 30 440 | } 441 | 442 | // Necessary for serde(default) 443 | const fn default_exclude_caches() -> bool { 444 | true 445 | } 446 | 447 | #[derive(Serialize, Deserialize, Debug, Clone)] 448 | #[serde(untagged)] 449 | enum RepositoryVersion { 450 | // Order matters! 451 | V2(Repository), 452 | V1(RepositoryV1), 453 | } 454 | 455 | impl RepositoryVersion { 456 | fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> 457 | where 458 | D: serde::Deserializer<'de>, 459 | { 460 | let repos = Vec::::deserialize(deserializer)?; 461 | let mut result = Vec::new(); 462 | for repo in repos { 463 | match repo { 464 | RepositoryVersion::V1(repo) => result.push(repo.to_latest()), 465 | RepositoryVersion::V2(repo) => result.push(repo.to_latest()), 466 | } 467 | } 468 | Ok(result) 469 | } 470 | } 471 | 472 | #[derive(Serialize, Deserialize, Debug, Clone)] 473 | pub(crate) struct Profile { 474 | name: String, 475 | backup_paths: Vec, 476 | #[serde(default)] 477 | exclude_patterns: Vec, 478 | #[serde(default = "default_exclude_caches")] 479 | exclude_caches: bool, 480 | #[serde(default)] 481 | prune_options: PruneOptions, 482 | #[serde(default = "default_action_timeout_seconds")] 483 | action_timeout_seconds: u64, 484 | #[serde(deserialize_with = "RepositoryVersion::deserialize")] 485 | repos: Vec, 486 | } 487 | 488 | impl std::fmt::Display for Profile { 489 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 490 | write!(f, "Profile<{}>", self.name) 491 | } 492 | } 493 | 494 | impl Profile { 495 | pub(crate) const DEFAULT_PROFILE_NAME: &'static str = "default"; 496 | pub(crate) async fn open_or_create(profile: &Option) -> BorgResult { 497 | let profile_name = profile 498 | .as_ref() 499 | .map(|s| s.as_str()) 500 | .unwrap_or(Self::DEFAULT_PROFILE_NAME); 501 | if let Some(profile) = Self::open_profile(profile_name).await? { 502 | Ok(profile) 503 | } else { 504 | Self::create_profile(profile_name).await 505 | } 506 | } 507 | 508 | fn blank(name: &str) -> Self { 509 | Self { 510 | name: name.to_string(), 511 | exclude_patterns: vec![], 512 | exclude_caches: true, 513 | backup_paths: vec![], 514 | prune_options: Default::default(), 515 | repos: vec![], 516 | action_timeout_seconds: default_action_timeout_seconds(), 517 | } 518 | } 519 | 520 | pub(crate) async fn create_profile(name: &str) -> BorgResult { 521 | if let Ok(Some(_)) = Self::open_profile(name).await { 522 | anyhow::bail!("Cannot create profile '{}' as it already exists!", name); 523 | } 524 | let profile = Self::blank(name); 525 | profile.save_profile().await?; 526 | info!( 527 | "Created {} ({})", 528 | profile, 529 | profile 530 | .profile_path() 531 | .unwrap_or("unknown_path".into()) 532 | .to_string_lossy() 533 | ); 534 | Ok(profile) 535 | } 536 | 537 | pub(crate) async fn open_profile(name: &str) -> BorgResult> { 538 | let profile_path = Profile::profile_path_for_name(name)?; 539 | if !profile_path.exists() { 540 | return Ok(None); 541 | } 542 | let profile = tokio::fs::read_to_string(profile_path) 543 | .await 544 | .with_context(|| format!("Failed to read profile {}", name))?; 545 | serde_json::from_str(&profile) 546 | .with_context(|| format!("Failed to deserialize profile {}", name)) 547 | .map(Some) 548 | } 549 | 550 | pub(crate) fn blocking_open_path>(path: P) -> BorgResult { 551 | let profile = std::fs::read_to_string(path.as_ref()).with_context(|| { 552 | format!("Failed to read profile {}", path.as_ref().to_string_lossy()) 553 | })?; 554 | serde_json::from_str(&profile).with_context(|| { 555 | format!( 556 | "Failed to deserialize profile {}", 557 | path.as_ref().to_string_lossy() 558 | ) 559 | }) 560 | } 561 | 562 | pub(crate) fn find_repo_from_mount_src(&self, repo_or_archive: &str) -> BorgResult { 563 | let repo_name = match repo_or_archive.find("::") { 564 | Some(loc) => repo_or_archive[..loc].to_string(), 565 | None => repo_or_archive.to_string(), 566 | }; 567 | tracing::debug!("Figured repo name is: {}", repo_name); 568 | self.active_repositories() 569 | .find(|repo| repo.path == repo_name) 570 | .ok_or_else(|| anyhow::anyhow!("Could not find repo: {}", repo_or_archive)) 571 | .cloned() 572 | } 573 | 574 | pub(crate) async fn create_backup_with_notification( 575 | &self, 576 | progress_channel: CommandResponseSender, 577 | ) -> BorgResult> { 578 | let completion_semaphore = Arc::new(Semaphore::new(0)); 579 | let num_active_repos = self.num_active_repos(); 580 | let self_name = format!("{}", self); 581 | let completion_semaphore_clone = completion_semaphore.clone(); 582 | let join_handle = tokio::spawn(async move { 583 | let start_time = Instant::now(); 584 | if let Err(e) = completion_semaphore_clone 585 | .acquire_many(num_active_repos as u32) 586 | .await 587 | { 588 | tracing::error!("Failed to wait on completion semaphore: {}", e); 589 | } else { 590 | let elapsed_duration = start_time.elapsed(); 591 | let nicely_formatted = format!( 592 | "{:0>2}:{:0>2}:{:0>2}", 593 | elapsed_duration.as_secs() / 60 / 60, 594 | elapsed_duration.as_secs() / 60 % 60, 595 | elapsed_duration.as_secs() % 60 596 | ); 597 | tracing::info!("Completed backup for {} in {}", self_name, nicely_formatted); 598 | log_on_error!( 599 | show_notification( 600 | &format!("Backup complete for {}", self_name), 601 | &format!("Completed in {}", nicely_formatted), 602 | SHORT_NOTIFICATION_DURATION 603 | ) 604 | .await, 605 | "Failed to show notification: {}" 606 | ); 607 | } 608 | }); 609 | self.create_backup_internal(progress_channel, completion_semaphore) 610 | .await?; 611 | Ok(join_handle) 612 | } 613 | 614 | pub(crate) async fn create_backup( 615 | &self, 616 | progress_channel: CommandResponseSender, 617 | ) -> BorgResult<()> { 618 | self.create_backup_internal(progress_channel, Arc::new(Semaphore::new(0))) 619 | .await 620 | } 621 | 622 | async fn create_backup_internal( 623 | &self, 624 | progress_channel: CommandResponseSender, 625 | completion_semaphore: Arc, 626 | ) -> BorgResult<()> { 627 | let archive_name = format!( 628 | "{}-{}", 629 | self.name(), 630 | chrono::Local::now().format("%Y-%m-%d:%H:%M:%S") 631 | ); 632 | for repo in self.active_repositories() { 633 | let backup_provider = repo.backup_provider(); 634 | backup_provider 635 | .create_backup( 636 | archive_name.clone(), 637 | self.backup_paths(), 638 | self.exclude_patterns(), 639 | self.exclude_caches(), 640 | repo.clone(), 641 | progress_channel.clone(), 642 | completion_semaphore.clone(), 643 | ) 644 | .await?; 645 | } 646 | Ok(()) 647 | } 648 | 649 | pub(crate) fn active_repositories(&self) -> impl Iterator { 650 | self.repositories().iter().filter(|repo| !repo.disabled) 651 | } 652 | 653 | pub(crate) fn num_active_repositories(&self) -> usize { 654 | self.active_repositories().count() 655 | } 656 | 657 | pub(crate) fn name(&self) -> &str { 658 | &self.name 659 | } 660 | 661 | pub(crate) fn repositories(&self) -> &[Repository] { 662 | &self.repos 663 | } 664 | 665 | pub(crate) fn backup_paths(&self) -> &[PathBuf] { 666 | &self.backup_paths 667 | } 668 | 669 | pub(crate) fn num_repos(&self) -> usize { 670 | self.repos.len() 671 | } 672 | 673 | pub(crate) fn num_active_repos(&self) -> usize { 674 | self.active_repositories().count() 675 | } 676 | 677 | pub(crate) fn action_timeout_seconds(&self) -> u64 { 678 | self.action_timeout_seconds 679 | } 680 | 681 | pub(crate) fn prune_options(&self) -> PruneOptions { 682 | self.prune_options 683 | } 684 | 685 | pub(crate) fn exclude_patterns(&self) -> &[String] { 686 | &self.exclude_patterns 687 | } 688 | 689 | pub(crate) fn exclude_caches(&self) -> bool { 690 | self.exclude_caches 691 | } 692 | 693 | pub(crate) fn serialize(&self) -> BorgResult { 694 | serde_json::to_string_pretty(self) 695 | .with_context(|| format!("Failed to serialize profile {}", self.name())) 696 | } 697 | 698 | pub(crate) async fn apply_operation(&mut self, op: ProfileOperation) -> BorgResult<()> { 699 | // This looks silly but I was intending to add more profile operations in the future :^) 700 | match op { 701 | ProfileOperation::AddBackupPath(path) => self.add_backup_path(path).await, 702 | } 703 | } 704 | 705 | pub(crate) fn profile_path_for_name(name: &str) -> BorgResult { 706 | let mut path = dirs::config_dir() 707 | .ok_or_else(|| anyhow::anyhow!("Failed to get config directory. Is $HOME set?"))?; 708 | path.push("borgtui"); 709 | path.push("profiles"); 710 | path.push(name); 711 | path.set_extension("json"); 712 | Ok(path) 713 | } 714 | 715 | pub(crate) fn profile_path(&self) -> BorgResult { 716 | Self::profile_path_for_name(&self.name) 717 | } 718 | 719 | pub(crate) async fn save_profile(&self) -> BorgResult<()> { 720 | let profile_path = self.profile_path()?; 721 | if let Some(parent) = profile_path.parent() { 722 | tokio::fs::create_dir_all(parent).await.with_context(|| { 723 | format!( 724 | "Failed to create parent directory for profile {}", 725 | self.name 726 | ) 727 | })? 728 | } 729 | let profile = self.serialize()?; 730 | tokio::fs::write(profile_path, profile) 731 | .await 732 | .with_context(|| format!("Failed to write profile {}", self.name)) 733 | } 734 | 735 | pub(crate) fn has_repository(&self, path: &str) -> bool { 736 | self.repos.iter().any(|r| r.path == path) 737 | } 738 | 739 | pub(crate) fn add_repository(&mut self, repo: Repository) { 740 | self.repos.push(repo); 741 | } 742 | 743 | // TODO: Rewrite this in terms of PassphraseSource 744 | pub(crate) fn update_repository_password( 745 | &mut self, 746 | repo_path: &str, 747 | encryption: Encryption, 748 | borg_passphrase: Option, 749 | ) -> BorgResult<()> { 750 | let self_str = format!("{}", self); // used in error message below 751 | let repo = self 752 | .repos 753 | .iter_mut() 754 | .find(|repo| repo.path == repo_path) 755 | .ok_or_else(|| { 756 | anyhow::anyhow!("Couldn't find repository {} in {}", repo_path, self_str) 757 | })?; 758 | repo.set_passphrase(encryption, borg_passphrase)?; 759 | Ok(()) 760 | } 761 | 762 | pub(crate) async fn add_backup_path(&mut self, path: PathBuf) -> BorgResult<()> { 763 | if self.backup_paths.contains(&path) { 764 | return Err(anyhow::anyhow!( 765 | "Path {} already exists in profile {}", 766 | path.display(), 767 | self.name 768 | )); 769 | } 770 | tokio::fs::metadata(&path).await.with_context(|| { 771 | format!( 772 | "Failed to get metadata for path {} when adding to profile {}. Does the path exist?", 773 | path.display(), self.name 774 | ) 775 | })?; 776 | let canonical_path = tokio::fs::canonicalize(&path).await.with_context(|| { 777 | format!( 778 | "Failed to canonicalize path {} when adding to profile {}. Does the path exist?", 779 | path.display(), 780 | self.name 781 | ) 782 | })?; 783 | if canonical_path != path { 784 | bail!("Attempted to add relative path or path that contained symlinks. \nAttempted='{}',\nCanonical='{}'", path.to_string_lossy(), canonical_path.to_string_lossy()); 785 | } 786 | self.backup_paths.push(path); 787 | Ok(()) 788 | } 789 | 790 | pub(crate) fn remove_backup_path(&mut self, path: &Path) { 791 | self.backup_paths.retain(|p| p != path); 792 | } 793 | } 794 | 795 | #[cfg(test)] 796 | mod tests { 797 | use super::*; 798 | const GOLDEN_V1_CONFIG: &str = r#" 799 | { 800 | "name": "dev", 801 | "backup_paths": [ 802 | "/home/david/programming/collatz", 803 | "/home/david/programming/advent-of-code-2020", 804 | "/home/david/programming/borgtui", 805 | "/home/david/Pictures" 806 | ], 807 | "exclude_patterns": [ 808 | "**/tmp*" 809 | ], 810 | "exclude_caches": true, 811 | "prune_options": { 812 | "keep_daily": 2, 813 | "keep_weekly": 1, 814 | "keep_monthly": 1, 815 | "keep_yearly": 1 816 | }, 817 | "action_timeout_seconds": 30, 818 | "repos": [ 819 | { 820 | "path": "/home/david/borg-test-repo0", 821 | "rsh": "foobar", 822 | "encryption": "None", 823 | "disabled": false, 824 | "kind": "Borg" 825 | }, 826 | { 827 | "path": "/home/david/restic-test-repo", 828 | "rsh": null, 829 | "encryption": { 830 | "Keyfile": "/home/david/.borg-passphrase" 831 | }, 832 | "disabled": false, 833 | "kind": "Rustic" 834 | } 835 | ] 836 | } 837 | "#; 838 | 839 | const GOLDEN_V2_CONFIG: &str = r#" 840 | { 841 | "name": "dev", 842 | "backup_paths": [ 843 | "/home/david/programming/collatz", 844 | "/home/david/programming/advent-of-code-2020", 845 | "/home/david/programming/borgtui", 846 | "/home/david/Pictures" 847 | ], 848 | "exclude_patterns": [ 849 | "**/tmp*" 850 | ], 851 | "exclude_caches": true, 852 | "prune_options": { 853 | "keep_daily": 2, 854 | "keep_weekly": 1, 855 | "keep_monthly": 1, 856 | "keep_yearly": 1 857 | }, 858 | "action_timeout_seconds": 30, 859 | "repos": [ 860 | { 861 | "path": "/home/david/borg-test-repo0", 862 | "encryption": "None", 863 | "disabled": false, 864 | "config": { 865 | "BorgV1": { 866 | "rsh": "foobar" 867 | } 868 | } 869 | }, 870 | { 871 | "path": "/home/david/restic-test-repo", 872 | "encryption": { 873 | "Keyfile": "/home/david/.borg-passphrase" 874 | }, 875 | "disabled": false, 876 | "config": { 877 | "Rustic": {} 878 | } 879 | } 880 | ] 881 | } 882 | "#; 883 | 884 | #[test] 885 | fn can_load_old_config() { 886 | let profile: Profile = serde_json::from_str(GOLDEN_V1_CONFIG).unwrap(); 887 | assert_eq!( 888 | profile.repositories()[0] 889 | .borg_options() 890 | .expect("should have borg options") 891 | .rsh, 892 | Some("foobar".to_string()) 893 | ); 894 | assert!(profile.repos[1].borg_options().is_err()); 895 | assert!(profile.repos[1].rustic_options().is_ok()); 896 | } 897 | 898 | #[test] 899 | fn can_load_new_config() { 900 | let profile: Profile = serde_json::from_str(GOLDEN_V2_CONFIG).unwrap(); 901 | assert_eq!( 902 | profile.repositories()[0] 903 | .borg_options() 904 | .expect("should have borg options") 905 | .rsh, 906 | Some("foobar".to_string()) 907 | ); 908 | assert!(profile.repositories()[1].borg_options().is_err()); 909 | assert!(profile.repositories()[1].rustic_options().is_ok()); 910 | } 911 | 912 | #[test] 913 | fn v1_to_v2_config_yields_same_config() { 914 | let profile_v1: Profile = serde_json::from_str(GOLDEN_V1_CONFIG).unwrap(); 915 | let profile_v2: Profile = serde_json::from_str(GOLDEN_V2_CONFIG).unwrap(); 916 | profile_v1 917 | .repositories() 918 | .iter() 919 | .zip(profile_v2.repositories()) 920 | .for_each(|(v1, v2)| { 921 | assert_eq!(v1.path, v2.path); 922 | assert_eq!(v1.disabled, v2.disabled); 923 | assert_eq!(v1.config, v2.config); 924 | }); 925 | } 926 | } 927 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | pub(crate) type BorgResult = anyhow::Result; 2 | 3 | use std::{ 4 | collections::{BTreeSet, VecDeque}, 5 | fmt::Display, 6 | path::PathBuf, 7 | }; 8 | 9 | pub(crate) const EXTENDED_NOTIFICATION_DURATION: std::time::Duration = 10 | std::time::Duration::from_secs(60); 11 | pub(crate) const SHORT_NOTIFICATION_DURATION: std::time::Duration = 12 | std::time::Duration::from_secs(15); 13 | 14 | /// Send a CommandResponse::Info in a channel. 15 | macro_rules! send_check_progress { 16 | ($channel:expr, $repo_loc:expr, $message:expr) => { 17 | if let Err(e) = $channel 18 | .send(crate::borgtui::CommandResponse::CheckProgress( 19 | crate::types::CheckProgress::new($repo_loc, $message), 20 | )) 21 | .await 22 | { 23 | tracing::error!("Error occurred while sending check progress message: {}", e); 24 | } 25 | }; 26 | } 27 | pub(crate) use send_check_progress; 28 | 29 | /// Send a CommandResponse::Info in a channel. 30 | macro_rules! send_check_complete { 31 | ($channel:expr, $repo_loc:expr, $error:expr) => { 32 | if let Err(e) = $channel 33 | .send(crate::borgtui::CommandResponse::CheckComplete( 34 | crate::types::CheckComplete::new($repo_loc, $error), 35 | )) 36 | .await 37 | { 38 | tracing::error!("Error occurred while sending check progress message: {}", e); 39 | } 40 | }; 41 | } 42 | pub(crate) use send_check_complete; 43 | 44 | /// Send a CommandResponse::Info in a channel. 45 | macro_rules! send_info { 46 | ($channel:expr, $info_message:expr) => { 47 | if let Err(e) = $channel.send(CommandResponse::Info($info_message)).await { 48 | tracing::error!( 49 | "Error occurred while sending info message \"{}\": {}", 50 | &$info_message, 51 | e 52 | ); 53 | } 54 | }; 55 | ($channel:expr, $info_message:expr, $error_message:expr) => { 56 | if let Err(e) = $channel.send(CommandResponse::Info($info_message)).await { 57 | tracing::error!($error_message, e); 58 | } 59 | }; 60 | } 61 | 62 | pub(crate) type CommandResponseSender = tokio::sync::mpsc::Sender; 63 | 64 | use glob::Pattern; 65 | use notify_rust::{Notification, Timeout}; 66 | pub(crate) use send_info; 67 | 68 | /// Send a CommandResponse::Info in a channel. 69 | macro_rules! send_error { 70 | ($channel:expr, $info_message:expr) => { 71 | if let Err(e) = $channel.send(CommandResponse::Error($info_message)).await { 72 | tracing::error!( 73 | "Error occurred while sending error message \"{}\": {}", 74 | $info_message, 75 | e 76 | ); 77 | } 78 | }; 79 | ($channel:expr, $info_message:expr, $error_message:expr) => { 80 | if let Err(e) = $channel.send(CommandResponse::Error($info_message)).await { 81 | tracing::error!($error_message, e); 82 | } 83 | }; 84 | } 85 | pub(crate) use send_error; 86 | 87 | macro_rules! log_on_error { 88 | ($result_expr:expr, $log_message:expr) => { 89 | match $result_expr { 90 | Ok(res) => res, 91 | Err(e) => { 92 | tracing::error!($log_message, e); 93 | return; 94 | } 95 | } 96 | }; 97 | } 98 | pub(crate) use log_on_error; 99 | 100 | /// Send a CommandResponse::Info in a channel. 101 | macro_rules! take_repo_lock { 102 | ($channel:expr, $repo:expr) => { 103 | if $repo.lock.try_lock().is_err() { 104 | crate::types::send_info!( 105 | $channel, 106 | format!("Repo lock {} is already held, waiting...", $repo) 107 | ); 108 | } 109 | let _backup_guard = $repo.lock.lock().await; 110 | }; 111 | ($channel:expr, $repo:expr, $message:expr) => { 112 | if $repo.lock.try_lock().is_err() { 113 | send_info!($channel, format!($message, $repo)); 114 | } 115 | let _backup_guard = $repo.lock.lock().await; 116 | }; 117 | } 118 | pub(crate) use take_repo_lock; 119 | 120 | use crate::borgtui::CommandResponse; 121 | 122 | #[derive(Debug, Default)] 123 | pub(crate) struct RingBuffer { 124 | deque: VecDeque, 125 | } 126 | 127 | impl RingBuffer { 128 | pub(crate) fn new() -> Self { 129 | Self { 130 | deque: VecDeque::with_capacity(N), 131 | } 132 | } 133 | 134 | pub(crate) fn push_back(&mut self, item: T) { 135 | self.deque.push_back(item); 136 | if self.deque.len() > N { 137 | self.deque.pop_front(); 138 | } 139 | } 140 | 141 | pub(crate) fn back(&self) -> Option<&T> { 142 | self.deque.back() 143 | } 144 | 145 | pub(crate) fn is_empty(&self) -> bool { 146 | self.deque.is_empty() 147 | } 148 | 149 | pub(crate) fn iter(&self) -> impl Iterator { 150 | self.deque.iter() 151 | } 152 | } 153 | 154 | impl FromIterator for RingBuffer { 155 | fn from_iter>(iter: I) -> Self { 156 | let mut r = RingBuffer::new(); 157 | for item in iter.into_iter() { 158 | r.push_back(item) 159 | } 160 | r 161 | } 162 | } 163 | 164 | #[cfg(test)] 165 | mod tests { 166 | use super::RingBuffer; 167 | 168 | #[test] 169 | fn test_pushes() { 170 | let mut r = RingBuffer::::new(); 171 | for c in 'A'..='C' { 172 | r.push_back(c); 173 | } 174 | assert_eq!(r.iter().copied().collect::>(), vec!['A', 'B', 'C']); 175 | assert_eq!(r.back(), Some(&'C')); 176 | r.push_back('D'); 177 | assert_eq!(r.back(), Some(&'D')); 178 | r.push_back('E'); 179 | assert_eq!(r.back(), Some(&'E')); 180 | assert_eq!(r.iter().copied().collect::>(), vec!['C', 'D', 'E']); 181 | r.push_back('F'); 182 | assert_eq!(r.iter().copied().collect::>(), vec!['D', 'E', 'F']); 183 | r.push_back('G'); 184 | assert_eq!(r.iter().copied().collect::>(), vec!['E', 'F', 'G']); 185 | } 186 | 187 | #[test] 188 | fn test_empty_iter() { 189 | let empty: RingBuffer = RingBuffer::new(); 190 | let test: Vec = Vec::new(); 191 | assert_eq!(empty.iter().copied().collect::>(), test); 192 | } 193 | 194 | #[test] 195 | fn test_larger() { 196 | let big: RingBuffer = (0..=1024).collect(); 197 | assert_eq!( 198 | big.iter().copied().collect::>(), 199 | (769..=1024).collect::>() 200 | ); 201 | } 202 | } 203 | 204 | #[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] 205 | pub(crate) struct PrettyBytes(pub(crate) u64); 206 | 207 | impl PrettyBytes { 208 | const UNITS: [&'static str; 6] = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"]; 209 | 210 | fn scaled_with_unit(&self) -> (f64, usize, &'static str) { 211 | let index = ((self.0 as f64).ln() / 1024_f64.ln()).trunc() as usize; 212 | match Self::UNITS.get(index) { 213 | Some(unit) => { 214 | let precision = if index < 3 { 0 } else { 3 }; 215 | (self.0 as f64 / 1024f64.powf(index as f64), precision, unit) 216 | } 217 | None => (self.0 as f64, 0, "B"), 218 | } 219 | } 220 | 221 | pub(crate) fn from_megabytes_f64(kb: f64) -> Self { 222 | PrettyBytes((kb * 1024.0 * 1024.0).trunc() as u64) 223 | } 224 | } 225 | 226 | impl Display for PrettyBytes { 227 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 228 | let (scaled, precision, unit) = self.scaled_with_unit(); 229 | write!(f, "{0:.1$}", scaled, precision)?; 230 | write!(f, " {}", unit) 231 | } 232 | } 233 | 234 | #[derive(Debug, Clone)] 235 | pub(crate) struct DirectoryFinder { 236 | known_directories: BTreeSet, 237 | num_updates: usize, 238 | exclude_patterns: Vec, 239 | } 240 | 241 | impl DirectoryFinder { 242 | const UPDATE_GUESS_MAX_DEPTH: usize = 2; 243 | pub(crate) fn new() -> Self { 244 | Self { 245 | known_directories: BTreeSet::new(), 246 | num_updates: 0, 247 | exclude_patterns: vec![], 248 | } 249 | } 250 | 251 | pub(crate) fn seed_exclude_patterns(&mut self, exclude_patterns: &[String]) -> BorgResult<()> { 252 | self.exclude_patterns = exclude_patterns 253 | .iter() 254 | .map(|s| Pattern::new(s.as_str())) 255 | .collect::>()?; 256 | Ok(()) 257 | } 258 | 259 | pub(crate) fn seed_from_directory(&mut self, directory: PathBuf, max_depth: usize) { 260 | let all_directories = walkdir::WalkDir::new(directory) 261 | .max_depth(max_depth) 262 | .follow_links(true) 263 | .into_iter() 264 | .filter_entry(|entry| { 265 | !self 266 | .exclude_patterns 267 | .iter() 268 | .any(|pattern| pattern.matches_path(entry.path())) 269 | }) 270 | .filter_map(|e| e.ok()) 271 | .filter(|entry| entry.file_type().is_dir()) 272 | .map(|entry| entry.path().to_owned()); 273 | self.known_directories.extend(all_directories); 274 | self.num_updates += 1; 275 | } 276 | 277 | pub(crate) fn update_guess(&mut self, file_path_fragment: &str) -> BorgResult<()> { 278 | let path = PathBuf::from(file_path_fragment); 279 | self.seed_from_directory(path, Self::UPDATE_GUESS_MAX_DEPTH); 280 | self.num_updates += 1; 281 | Ok(()) 282 | } 283 | 284 | pub(crate) fn suggestions( 285 | &self, 286 | starting_fragment: &str, 287 | max_results: usize, 288 | ) -> BorgResult<(Vec, usize)> { 289 | let exclude_dot_files = !starting_fragment.contains('.'); 290 | let path = PathBuf::from(starting_fragment); 291 | Ok(( 292 | self.known_directories 293 | .range(path..) 294 | .filter(|res| !(res.to_string_lossy().contains('.') && exclude_dot_files)) 295 | .take(max_results) 296 | .cloned() 297 | .collect(), 298 | self.num_updates, 299 | )) 300 | } 301 | } 302 | 303 | pub(crate) async fn show_notification>( 304 | summary: &str, 305 | body: &str, 306 | duration: I, 307 | ) -> BorgResult<()> { 308 | Notification::new() 309 | .summary(summary) 310 | .subtitle("BorgTUI") 311 | .body(body) 312 | .timeout(duration) 313 | .show_async() 314 | .await?; 315 | Ok(()) 316 | } 317 | 318 | #[derive(Debug, Clone)] 319 | pub(crate) struct CheckComplete { 320 | pub(crate) repo_loc: String, 321 | pub(crate) error: Option, 322 | } 323 | 324 | impl CheckComplete { 325 | pub(crate) fn new(repo_loc: String, error: Option) -> Self { 326 | Self { repo_loc, error } 327 | } 328 | } 329 | 330 | #[derive(Debug, Clone)] 331 | pub(crate) struct CheckProgress { 332 | pub(crate) repo_loc: String, 333 | pub(crate) message: String, 334 | } 335 | 336 | impl CheckProgress { 337 | pub(crate) fn new(repo_loc: String, message: String) -> Self { 338 | Self { repo_loc, message } 339 | } 340 | } 341 | 342 | #[derive(Debug, Clone)] 343 | pub(crate) enum BackupCreationProgress { 344 | InProgress { 345 | original_size: u64, 346 | compressed_size: u64, 347 | deduplicated_size: u64, 348 | num_files: u64, 349 | current_path: String, 350 | }, 351 | Finished, 352 | } 353 | 354 | #[derive(Debug)] 355 | pub(crate) struct BackupCreateProgress { 356 | pub(crate) repository: String, 357 | pub(crate) create_progress: BackupCreationProgress, 358 | } 359 | 360 | impl BackupCreateProgress { 361 | pub(crate) fn new(repository: String, create_progress: BackupCreationProgress) -> Self { 362 | Self { 363 | repository, 364 | create_progress, 365 | } 366 | } 367 | 368 | pub(crate) fn finished(repository: String) -> Self { 369 | Self { 370 | repository, 371 | create_progress: BackupCreationProgress::Finished, 372 | } 373 | } 374 | } 375 | 376 | #[derive(Debug, Clone)] 377 | pub(crate) struct Archive { 378 | pub(crate) name: String, 379 | pub(crate) creation_date: chrono::NaiveDateTime, 380 | } 381 | 382 | #[derive(Debug, Clone)] 383 | pub(crate) struct RepositoryArchives { 384 | pub(crate) path: String, 385 | pub(crate) archives: Vec, 386 | } 387 | 388 | impl RepositoryArchives { 389 | pub(crate) fn new(path: String, archives: Vec) -> Self { 390 | Self { path, archives } 391 | } 392 | } 393 | --------------------------------------------------------------------------------