├── .gitignore ├── .travis.yml ├── COPYRIGHT ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── NOTES.md ├── README.md ├── TODO.md ├── crypto-spec.md ├── src ├── archive.rs ├── backend │ ├── file.rs │ └── mod.rs ├── cmds │ ├── create │ │ └── mod.rs │ ├── diff.rs │ ├── keygen │ │ └── mod.rs │ ├── list │ │ └── mod.rs │ ├── mod.rs │ ├── restore │ │ └── mod.rs │ └── verify │ │ └── mod.rs ├── error.rs ├── keystore.rs ├── logger.rs ├── main.rs └── newtype_macros.rs └── tests ├── docker-test ├── .gitignore ├── README.md ├── src │ ├── Dockerfile │ ├── create-backup.sh │ └── restore-backup.sh └── test.py └── test.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /keyfile 3 | /acd.authorization.json 4 | /acd.cache.sqlite 5 | /acd.endpoint.json 6 | /acd.security_profile.json 7 | /cache.sqlite 8 | /.config 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | sudo: false 3 | rust: 4 | - stable 5 | - beta 6 | addons: 7 | apt: 8 | packages: 9 | - liblzma-dev 10 | script: 11 | - cargo build --verbose 12 | - cargo test --verbose 13 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | This project is copyright 2019, fpgaminer@bitcoin-mining.com. 2 | 3 | Licensed under the Apache License, Version 2.0 or the MIT license , at your option. All files in the project 6 | unless otherwise noted may not be copied, modified, or distributed except 7 | according to those terms. 8 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "ansi_term" 5 | version = "0.11.0" 6 | source = "registry+https://github.com/rust-lang/crates.io-index" 7 | dependencies = [ 8 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 9 | ] 10 | 11 | [[package]] 12 | name = "atty" 13 | version = "0.2.13" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | dependencies = [ 16 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 17 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 18 | ] 19 | 20 | [[package]] 21 | name = "bitflags" 22 | version = "1.1.0" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | 25 | [[package]] 26 | name = "c2-chacha" 27 | version = "0.2.2" 28 | source = "registry+https://github.com/rust-lang/crates.io-index" 29 | dependencies = [ 30 | "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", 31 | "ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", 32 | ] 33 | 34 | [[package]] 35 | name = "cfg-if" 36 | version = "0.1.9" 37 | source = "registry+https://github.com/rust-lang/crates.io-index" 38 | 39 | [[package]] 40 | name = "clap" 41 | version = "2.33.0" 42 | source = "registry+https://github.com/rust-lang/crates.io-index" 43 | dependencies = [ 44 | "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", 45 | "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", 46 | "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 47 | "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", 48 | "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", 49 | "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", 50 | "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", 51 | ] 52 | 53 | [[package]] 54 | name = "data-encoding" 55 | version = "2.1.2" 56 | source = "registry+https://github.com/rust-lang/crates.io-index" 57 | 58 | [[package]] 59 | name = "fallible-iterator" 60 | version = "0.2.0" 61 | source = "registry+https://github.com/rust-lang/crates.io-index" 62 | 63 | [[package]] 64 | name = "fallible-streaming-iterator" 65 | version = "0.1.9" 66 | source = "registry+https://github.com/rust-lang/crates.io-index" 67 | 68 | [[package]] 69 | name = "fuchsia-cprng" 70 | version = "0.1.1" 71 | source = "registry+https://github.com/rust-lang/crates.io-index" 72 | 73 | [[package]] 74 | name = "gcc" 75 | version = "0.3.55" 76 | source = "registry+https://github.com/rust-lang/crates.io-index" 77 | 78 | [[package]] 79 | name = "getrandom" 80 | version = "0.1.11" 81 | source = "registry+https://github.com/rust-lang/crates.io-index" 82 | dependencies = [ 83 | "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", 84 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 85 | "wasi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", 86 | ] 87 | 88 | [[package]] 89 | name = "idna" 90 | version = "0.2.0" 91 | source = "registry+https://github.com/rust-lang/crates.io-index" 92 | dependencies = [ 93 | "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", 94 | "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", 95 | "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", 96 | ] 97 | 98 | [[package]] 99 | name = "itoa" 100 | version = "0.4.4" 101 | source = "registry+https://github.com/rust-lang/crates.io-index" 102 | 103 | [[package]] 104 | name = "lazy_static" 105 | version = "1.3.0" 106 | source = "registry+https://github.com/rust-lang/crates.io-index" 107 | 108 | [[package]] 109 | name = "libc" 110 | version = "0.2.62" 111 | source = "registry+https://github.com/rust-lang/crates.io-index" 112 | 113 | [[package]] 114 | name = "libsqlite3-sys" 115 | version = "0.16.0" 116 | source = "registry+https://github.com/rust-lang/crates.io-index" 117 | dependencies = [ 118 | "pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", 119 | "vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", 120 | ] 121 | 122 | [[package]] 123 | name = "linked-hash-map" 124 | version = "0.5.2" 125 | source = "registry+https://github.com/rust-lang/crates.io-index" 126 | 127 | [[package]] 128 | name = "log" 129 | version = "0.4.8" 130 | source = "registry+https://github.com/rust-lang/crates.io-index" 131 | dependencies = [ 132 | "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", 133 | ] 134 | 135 | [[package]] 136 | name = "lru-cache" 137 | version = "0.1.2" 138 | source = "registry+https://github.com/rust-lang/crates.io-index" 139 | dependencies = [ 140 | "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", 141 | ] 142 | 143 | [[package]] 144 | name = "matches" 145 | version = "0.1.8" 146 | source = "registry+https://github.com/rust-lang/crates.io-index" 147 | 148 | [[package]] 149 | name = "memchr" 150 | version = "2.2.1" 151 | source = "registry+https://github.com/rust-lang/crates.io-index" 152 | 153 | [[package]] 154 | name = "percent-encoding" 155 | version = "2.1.0" 156 | source = "registry+https://github.com/rust-lang/crates.io-index" 157 | 158 | [[package]] 159 | name = "pkg-config" 160 | version = "0.3.15" 161 | source = "registry+https://github.com/rust-lang/crates.io-index" 162 | 163 | [[package]] 164 | name = "ppv-lite86" 165 | version = "0.2.5" 166 | source = "registry+https://github.com/rust-lang/crates.io-index" 167 | 168 | [[package]] 169 | name = "preserve" 170 | version = "0.2.0" 171 | dependencies = [ 172 | "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", 173 | "data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 174 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 175 | "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", 176 | "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", 177 | "rusqlite 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)", 178 | "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 179 | "rust-lzma 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 180 | "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", 181 | "serde_derive 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", 182 | "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", 183 | "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 184 | "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", 185 | "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 186 | ] 187 | 188 | [[package]] 189 | name = "proc-macro2" 190 | version = "1.0.1" 191 | source = "registry+https://github.com/rust-lang/crates.io-index" 192 | dependencies = [ 193 | "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", 194 | ] 195 | 196 | [[package]] 197 | name = "quote" 198 | version = "1.0.2" 199 | source = "registry+https://github.com/rust-lang/crates.io-index" 200 | dependencies = [ 201 | "proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", 202 | ] 203 | 204 | [[package]] 205 | name = "rand" 206 | version = "0.3.23" 207 | source = "registry+https://github.com/rust-lang/crates.io-index" 208 | dependencies = [ 209 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 210 | "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", 211 | ] 212 | 213 | [[package]] 214 | name = "rand" 215 | version = "0.4.6" 216 | source = "registry+https://github.com/rust-lang/crates.io-index" 217 | dependencies = [ 218 | "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 219 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 220 | "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", 221 | "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 222 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 223 | ] 224 | 225 | [[package]] 226 | name = "rand" 227 | version = "0.7.0" 228 | source = "registry+https://github.com/rust-lang/crates.io-index" 229 | dependencies = [ 230 | "getrandom 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", 231 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 232 | "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 233 | "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", 234 | "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", 235 | ] 236 | 237 | [[package]] 238 | name = "rand_chacha" 239 | version = "0.2.1" 240 | source = "registry+https://github.com/rust-lang/crates.io-index" 241 | dependencies = [ 242 | "c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 243 | "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", 244 | ] 245 | 246 | [[package]] 247 | name = "rand_core" 248 | version = "0.3.1" 249 | source = "registry+https://github.com/rust-lang/crates.io-index" 250 | dependencies = [ 251 | "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", 252 | ] 253 | 254 | [[package]] 255 | name = "rand_core" 256 | version = "0.4.2" 257 | source = "registry+https://github.com/rust-lang/crates.io-index" 258 | 259 | [[package]] 260 | name = "rand_core" 261 | version = "0.5.0" 262 | source = "registry+https://github.com/rust-lang/crates.io-index" 263 | dependencies = [ 264 | "getrandom 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", 265 | ] 266 | 267 | [[package]] 268 | name = "rand_hc" 269 | version = "0.2.0" 270 | source = "registry+https://github.com/rust-lang/crates.io-index" 271 | dependencies = [ 272 | "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", 273 | ] 274 | 275 | [[package]] 276 | name = "rdrand" 277 | version = "0.4.0" 278 | source = "registry+https://github.com/rust-lang/crates.io-index" 279 | dependencies = [ 280 | "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", 281 | ] 282 | 283 | [[package]] 284 | name = "redox_syscall" 285 | version = "0.1.56" 286 | source = "registry+https://github.com/rust-lang/crates.io-index" 287 | 288 | [[package]] 289 | name = "remove_dir_all" 290 | version = "0.5.2" 291 | source = "registry+https://github.com/rust-lang/crates.io-index" 292 | dependencies = [ 293 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 294 | ] 295 | 296 | [[package]] 297 | name = "rusqlite" 298 | version = "0.20.0" 299 | source = "registry+https://github.com/rust-lang/crates.io-index" 300 | dependencies = [ 301 | "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 302 | "fallible-iterator 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", 303 | "fallible-streaming-iterator 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", 304 | "libsqlite3-sys 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", 305 | "lru-cache 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 306 | "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 307 | "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", 308 | ] 309 | 310 | [[package]] 311 | name = "rust-crypto" 312 | version = "0.2.36" 313 | source = "registry+https://github.com/rust-lang/crates.io-index" 314 | dependencies = [ 315 | "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)", 316 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 317 | "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)", 318 | "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", 319 | "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", 320 | ] 321 | 322 | [[package]] 323 | name = "rust-lzma" 324 | version = "0.4.0" 325 | source = "registry+https://github.com/rust-lang/crates.io-index" 326 | dependencies = [ 327 | "pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", 328 | ] 329 | 330 | [[package]] 331 | name = "rustc-serialize" 332 | version = "0.3.24" 333 | source = "registry+https://github.com/rust-lang/crates.io-index" 334 | 335 | [[package]] 336 | name = "ryu" 337 | version = "1.0.0" 338 | source = "registry+https://github.com/rust-lang/crates.io-index" 339 | 340 | [[package]] 341 | name = "serde" 342 | version = "1.0.99" 343 | source = "registry+https://github.com/rust-lang/crates.io-index" 344 | 345 | [[package]] 346 | name = "serde_derive" 347 | version = "1.0.99" 348 | source = "registry+https://github.com/rust-lang/crates.io-index" 349 | dependencies = [ 350 | "proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", 351 | "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", 352 | "syn 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", 353 | ] 354 | 355 | [[package]] 356 | name = "serde_json" 357 | version = "1.0.40" 358 | source = "registry+https://github.com/rust-lang/crates.io-index" 359 | dependencies = [ 360 | "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", 361 | "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", 362 | "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", 363 | ] 364 | 365 | [[package]] 366 | name = "smallvec" 367 | version = "0.6.10" 368 | source = "registry+https://github.com/rust-lang/crates.io-index" 369 | 370 | [[package]] 371 | name = "strsim" 372 | version = "0.8.0" 373 | source = "registry+https://github.com/rust-lang/crates.io-index" 374 | 375 | [[package]] 376 | name = "syn" 377 | version = "1.0.4" 378 | source = "registry+https://github.com/rust-lang/crates.io-index" 379 | dependencies = [ 380 | "proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", 381 | "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", 382 | "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", 383 | ] 384 | 385 | [[package]] 386 | name = "tempfile" 387 | version = "3.1.0" 388 | source = "registry+https://github.com/rust-lang/crates.io-index" 389 | dependencies = [ 390 | "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", 391 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 392 | "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", 393 | "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", 394 | "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", 395 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 396 | ] 397 | 398 | [[package]] 399 | name = "textwrap" 400 | version = "0.11.0" 401 | source = "registry+https://github.com/rust-lang/crates.io-index" 402 | dependencies = [ 403 | "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", 404 | ] 405 | 406 | [[package]] 407 | name = "time" 408 | version = "0.1.42" 409 | source = "registry+https://github.com/rust-lang/crates.io-index" 410 | dependencies = [ 411 | "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", 412 | "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", 413 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 414 | ] 415 | 416 | [[package]] 417 | name = "unicode-bidi" 418 | version = "0.3.4" 419 | source = "registry+https://github.com/rust-lang/crates.io-index" 420 | dependencies = [ 421 | "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", 422 | ] 423 | 424 | [[package]] 425 | name = "unicode-normalization" 426 | version = "0.1.8" 427 | source = "registry+https://github.com/rust-lang/crates.io-index" 428 | dependencies = [ 429 | "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", 430 | ] 431 | 432 | [[package]] 433 | name = "unicode-width" 434 | version = "0.1.6" 435 | source = "registry+https://github.com/rust-lang/crates.io-index" 436 | 437 | [[package]] 438 | name = "unicode-xid" 439 | version = "0.2.0" 440 | source = "registry+https://github.com/rust-lang/crates.io-index" 441 | 442 | [[package]] 443 | name = "url" 444 | version = "2.1.0" 445 | source = "registry+https://github.com/rust-lang/crates.io-index" 446 | dependencies = [ 447 | "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", 448 | "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", 449 | "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 450 | ] 451 | 452 | [[package]] 453 | name = "vcpkg" 454 | version = "0.2.7" 455 | source = "registry+https://github.com/rust-lang/crates.io-index" 456 | 457 | [[package]] 458 | name = "vec_map" 459 | version = "0.8.1" 460 | source = "registry+https://github.com/rust-lang/crates.io-index" 461 | 462 | [[package]] 463 | name = "wasi" 464 | version = "0.5.0" 465 | source = "registry+https://github.com/rust-lang/crates.io-index" 466 | 467 | [[package]] 468 | name = "winapi" 469 | version = "0.3.7" 470 | source = "registry+https://github.com/rust-lang/crates.io-index" 471 | dependencies = [ 472 | "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 473 | "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 474 | ] 475 | 476 | [[package]] 477 | name = "winapi-i686-pc-windows-gnu" 478 | version = "0.4.0" 479 | source = "registry+https://github.com/rust-lang/crates.io-index" 480 | 481 | [[package]] 482 | name = "winapi-x86_64-pc-windows-gnu" 483 | version = "0.4.0" 484 | source = "registry+https://github.com/rust-lang/crates.io-index" 485 | 486 | [metadata] 487 | "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" 488 | "checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" 489 | "checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" 490 | "checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101" 491 | "checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" 492 | "checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" 493 | "checksum data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f47ca1860a761136924ddd2422ba77b2ea54fe8cc75b9040804a0d9d32ad97" 494 | "checksum fallible-iterator 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" 495 | "checksum fallible-streaming-iterator 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" 496 | "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" 497 | "checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" 498 | "checksum getrandom 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "fc344b02d3868feb131e8b5fe2b9b0a1cc42942679af493061fc13b853243872" 499 | "checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" 500 | "checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" 501 | "checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" 502 | "checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba" 503 | "checksum libsqlite3-sys 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5e5b95e89c330291768dc840238db7f9e204fd208511ab6319b56193a7f2ae25" 504 | "checksum linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" 505 | "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" 506 | "checksum lru-cache 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" 507 | "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" 508 | "checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" 509 | "checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" 510 | "checksum pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c1d2cfa5a714db3b5f24f0915e74fcdf91d09d496ba61329705dda7774d2af" 511 | "checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b" 512 | "checksum proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c5c2380ae88876faae57698be9e9775e3544decad214599c3a6266cca6ac802" 513 | "checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" 514 | "checksum rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" 515 | "checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" 516 | "checksum rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d47eab0e83d9693d40f825f86948aa16eff6750ead4bdffc4ab95b8b3a7f052c" 517 | "checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" 518 | "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" 519 | "checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" 520 | "checksum rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "615e683324e75af5d43d8f7a39ffe3ee4a9dc42c5c701167a71dc59c3a493aca" 521 | "checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" 522 | "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" 523 | "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" 524 | "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" 525 | "checksum rusqlite 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2a194373ef527035645a1bc21b10dc2125f73497e6e155771233eb187aedd051" 526 | "checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a" 527 | "checksum rust-lzma 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c584b6338d00183ee853f099e2560eeda850925850f90ba35471423958a27ba5" 528 | "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" 529 | "checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997" 530 | "checksum serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)" = "fec2851eb56d010dc9a21b89ca53ee75e6528bab60c11e89d38390904982da9f" 531 | "checksum serde_derive 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)" = "cb4dc18c61206b08dc98216c98faa0232f4337e1e1b8574551d5bad29ea1b425" 532 | "checksum serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "051c49229f282f7c6f3813f8286cc1e3323e8051823fce42c7ea80fe13521704" 533 | "checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" 534 | "checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" 535 | "checksum syn 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c65d951ab12d976b61a41cf9ed4531fc19735c6e6d84a4bb1453711e762ec731" 536 | "checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" 537 | "checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" 538 | "checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" 539 | "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" 540 | "checksum unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "141339a08b982d942be2ca06ff8b076563cbe223d1befd5450716790d44e2426" 541 | "checksum unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7007dbd421b92cc6e28410fe7362e2e0a2503394908f417b68ec8d1c364c4e20" 542 | "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" 543 | "checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" 544 | "checksum vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "33dd455d0f96e90a75803cfeb7f948768c08d70a6de9a8d2362461935698bf95" 545 | "checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" 546 | "checksum wasi 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fd5442abcac6525a045cc8c795aedb60da7a2e5e89c7bf18a0d5357849bb23c7" 547 | "checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" 548 | "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 549 | "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 550 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["fpgaminer "] 3 | description = "Encrypted backup system." 4 | license = "MIT OR Apache-2.0" 5 | name = "preserve" 6 | readme = "README.md" 7 | repository = "https://github.com/fpgaminer/preserve" 8 | version = "0.2.0" 9 | edition = "2018" 10 | 11 | [dependencies] 12 | clap = "2.33.0" 13 | libc = "0.2.62" 14 | log = { version = "0.4.8", features = ["std"] } 15 | rand = "0.7.0" 16 | rusqlite = "0.20.0" 17 | rust-crypto = "0.2.36" 18 | rust-lzma = "0.4.0" 19 | tempfile = "3.1.0" 20 | time = "0.1.42" 21 | url = "2.1.0" 22 | serde = "1.0.99" 23 | serde_derive = "1.0.99" 24 | serde_json = "1.0.40" 25 | data-encoding = "2.1.2" 26 | 27 | [profile] 28 | 29 | [profile.dev] 30 | debug = true 31 | opt-level = 2 32 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2016] [fpgaminer] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 fpgaminer 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /NOTES.md: -------------------------------------------------------------------------------- 1 | How to diff: 2 | ls -laR --full-time ./ > /tmp/filelist.txt 3 | cd restored 4 | ls -laR --full-time ./ > /tmp/filelist-restored.txt 5 | 6 | This will dump out all the permissions, owners, filesize, mtime, etc. Use diff/vimdiff to compare. 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Preserve [![Build Status](https://travis-ci.org/fpgaminer/preserve.svg?branch=master)](https://travis-ci.org/fpgaminer/preserve) # 2 | Preserve is an encrypted backup system written in Rust. All backup data is encrypted, so backups can be stored on untrusted devices/services without exposing any of your data. Backups are simple, operating very similar to creating an archive like Zip or Tar. Deduplication makes this space efficient. 3 | 4 | ## Status 5 | I am actively developing this project, so it is not stable or ready for general use. The code is currently messy and missing many vital features. Follow along if you're interested! 6 | 7 | ## Usage 8 | 9 | 1. Generate a keyfile 10 | 11 | ``` 12 | preserve keygen --keyfile keyfile 13 | ``` 14 | 15 | Make sure to store this keyfile in a safe place. Anyone who has access to this keyfile can read your backups and/or corrupt them. 16 | 17 | 2. Create a backup 18 | 19 | ``` 20 | preserve create --keyfile keyfile --backend file --backend-path /path/to/my/backups/ my-backup-`date +%Y-%m-%d_%H-%M-%S` /home/me/ 21 | ``` 22 | 23 | This will create a backup of everything inside `/home/me/`, the backup will be called something like `my-backup-2016-02-25_11-56-51`, the backup will be stored in the filesystem at `/path/to/my/backups`. To take advantage of deduplication you should store all your backups in the same place. If you backup multiple machines, you could use an external drive or NAS. If you use the same keyfile for all machines then Preserve will dedup across all machines. 24 | 25 | 3. List backups 26 | 27 | ``` 28 | preserve list --keyfile keyfile --backend file --backend-path /path/to/my/backups/ 29 | ``` 30 | 31 | 3. Restore a backup 32 | 33 | ``` 34 | preserve restore --keyfile keyfile --backend file --backend-path /path/to/my/backups/ name-of-backup-to-restore /path/to/restore/it/to/ 35 | ``` 36 | 37 | This will restore the backup named `name-of-backup-to-restore`, extracting its contents to `/path/to/restore/it/to/` 38 | 39 | ## Build 40 | ``` 41 | cargo build 42 | ``` 43 | 44 | ## Test 45 | ``` 46 | cargo test 47 | ``` 48 | 49 | ## Details 50 | It's easiest to understand Preserve by going through how it creates a backup. When you tell Preserve to create a backup, it walks the specified path looking for all files and folders. It collects information about all those files and folders (name, permissions, mtime, size). Then it goes through all the files and reads their contents. It reads file contents 1MB at a time. For each 1MB chunk, it encrypts the chunk using convergent encryption. Convergent encryption is determinsitic, so given the same 1MB chunk it will output the same 1MB encrypted block (plus id and mac). Each block also has a small (32 bytes) unique identifier associated with it. So after Preserve has finished reading all the chunks of a file, it stores the contents as a list of these unique identifiers, and stores the actual blocks on the backend. When it encounters the same block twice, it has to store the metadata twice, but the actual encrypted data only gets stored once on the backend. This is how Preserve achieves its deduplication. If you create one backup, and then create another of the same exact data, Preserve won't have to store any new blocks on the backend. It would only need to store a new set of metadata. 51 | 52 | When all files have been traversed, the archive (list of files, directories, and metadata) is serialized to JSON, compressed with XZ, encrypted using a public key, and then stored on the backend. 53 | 54 | Various caches are used to speed this process up. If a file hasn't changed since Preserve last backed it up, then it will pull its metadata and list of content identifiers from cache. So it won't have to re-read the file. 55 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | * Clean up TODOs and improve error reporting. 2 | * Test individual components of backup system (unit testing) 3 | * Add a config option, --dereference, which will handle symlinks by "dereferencing" them. A symlink will become a regular file in the archive with the contents set to the the contents of the target. This can be applied either during archive creation, or during extraction (implemented for create, but not restore). 4 | * Have a service that actively tests the backups. Download blocks and archives and check their HMAC. Download an archive, decrypt, and try a block or two. Do this every so often; often enough that everything is probabilistically checked at a reasonable frequency. 5 | * Option to backup to multiple backends 6 | * Clean up crypto-spec.md 7 | * Config file 8 | * Diehard randomness testing 9 | * Clean old entries out of mtime_cache 10 | * Restore file owner/group 11 | * At the top level of archive, store a table mapping uids/gids to names. Then, during extraction, do a remap. For every entry in the table, check the local system for the given user name or group name. Use that to remap the archive's uid/gid to the local system's uid/gid. 12 | * Add tests for --one-file-system flag 13 | * During archive creation, after reading all files, sleep for a second and then rescan the metadata on all files. If there are any mismatches, reread those files. This should catch any file modification that the existing file modification detection scheme misses (due to lag in mtime updates). 14 | * When warning that a symlink was backed up but its link was not, we should also print the link. 15 | * The Docker based integration test should do more manipulation between backups. 16 | * The Docker based integration test script should more clearly indicate whether the test passed or not. 17 | * The Docker based integration test should be integrated into Travis-CI. 18 | * Add the ability to extract specific files/folders during a restore. 19 | * verbose flag 20 | * Rename "current_filesystem" variable in create code; generally re-work how one-file-system works 21 | * The help message for "--exclude" says that it is a required option ... which isn't true. Something weird with clap. 22 | * Maybe use the flexi_logger crate instead of our custom logger? 23 | * Remove Comparison functions from newtype and replace with a constant_time_compare function so it's explicit (also remove hashing, partialord, etc, etc) 24 | * Add a test that backs up to one backend, and then to another, and then restores from each. This should make sure that the cache mechanism doesn't accidentally cause preserve to skip uploading blocks. Also we should try copying over half of a backend and making sure it still works after another archive creation and restore; again verifying that the cache doesn't prevent preserve from correctly backing up. 25 | * Use failure crate 26 | * Add tests for diff command. 27 | * Backup keyfile to backend(s) (encrypted using HellishKDF) 28 | * Use progress bar crate 29 | * Refcounting on the backend (need to implement the new backend API) 30 | * Add tests for verify command. 31 | * Add tests for list command. 32 | * Should newtype_macros be a procedural macro instead? Seems like it would create cleaner code. 33 | * Replace rust-crypto dependency -------------------------------------------------------------------------------- /crypto-spec.md: -------------------------------------------------------------------------------- 1 | # Preserve Cryptography 2 | 3 | This document describes the cryptography used by Preserve. 4 | 5 | ## Overview 6 | 7 | The current system provides the following (as long as your keys are safe): 8 | 9 | * The contents of your backups cannot be read by attackers, even if your backups are stolen/leaked. 10 | * The contents of your backups cannot be read by backends. i.e. you do not need to trust your privacy to a cloud-based backend. 11 | * A malicious backend can corrupt your backups, but because backups are authenticated you'll know if that happens and Preserve won't restore malicious data (i.e. attackers cannot inject viruses into your backups). 12 | * Deduplication. 13 | 14 | Preserve does, however, have these caveats: 15 | 16 | * It does not hide the length of your data. i.e. the size of your backups, and in some cases the sizes of your files. 17 | * Backends can see which blocks of encrypted data belong to which backups. A backend can always deduce this information based on your usage patterns, so Preserve makes no effort to hide this information. 18 | 19 | These caveats are not unique to Preserve, and are not generally considered dangerous, but are noted for transparency. 20 | 21 | Preserve is not weak to the usual failings of secure deduplication (e.g. confirmation attacks), because the keys used are unique to the user. This is not only a security measure, but also a privacy measure. For example, some cloud services using global convergent encryption schemes can tell what users are storing based on the unique hashes. This is not possible with Preserve; a backend cannot deduce what a user is storing from block hashes. 22 | 23 | Preserve uses the SIV construction to efficiently achieve its goal of secure deduplication. The SIV construction makes encryption of blocks deterministic and provides each with a unique ID that can be used to reference the encrypted data. SIV and its security proof are provided in the citation (Deterministic authenticated-encryption). Preserve is thus DAE secure. The SIV construction requires a PRF-secure primitive, and an IND$-secure IV cipher. In Preserve we use HMAC-SHA-512-256 as the PRF-secure primitive, and ChaCha20 wrapped by HMAC-SHA-512 as the IND$-secure cipher. 24 | 25 | HMAC-SHA-512-256 (which is HMAC-SHA-512 truncated to 256-bits, not HMAC-SHA-512/256) is used because: it's faster on 64-bit platforms than HMAC-SHA-256; it is well seasoned, unlike potentially better functions like Blake, while still being fast enough; it's a random oracle according to the citation (Merkle-Damgård revisited); the HMAC construction has been shown to provide additional security when the underlying function fails, so it's a potentially more robust choice compared to SHA-512-256 even though SHA-512-256 has all the same properties. 26 | 27 | ChaCha20 is wrapped by HMAC-SHA-512 using `HMAC-SHA-512(key, IV)` to derive a 256-bit key and 64-bit nonce for the invocation of ChaCha20. Basically it turns ChaCha20 into a cipher with a 256-bit nonce. This is used because the usual ChaCha20 cipher only accepts a 64-bit nonce, while our SIV implementation calls for 256-bits. Reasons why we didn't use something else: XChaCha20 is a commonly used extension of ChaCha20 and derives its security straightforwardly from the XSalsa20 paper, however it only has a 192-bit nonce. 192-bits *might* be enough. I would need to review the security proof for the SIV construction in-depth to know for sure how the security margin is affected by reducing the nonce space. An XXChaCha20 primitive could be invented (three-layer cascade), but this requires studying the XSalsa20 security proof in depth to see if it covers the three-layer case. Both options are likely secure, but require additional scrutiny (by myself and anyone reviewing Preserve's security). In contrast we know for sure that HMAC-SHA-512 wrapped ChaCha20 fulfills the requirements and we already use HMAC-SHA-512 elsewhere. 28 | 29 | SIV also calls for an Encode function, used to encode the input to the PRF. It must be such that Encode uniquely encodes its inputs (given any input A, there exists no input B where `A!=B` and `Encode(A) = Encode(B)`). Preserve simply uses `Encode(AAD, Plaintext) = AAD || Plaintext || le64encode(AAD.length) || le64encode(Plaintext.length)`. 30 | 31 | 32 | ### Citations 33 | 34 | (Merkle-Damgård revisited) Coron, Jean-Sébastien, et al. "Merkle-Damgård revisited: How to construct a hash function." Annual International Cryptology Conference. Springer, Berlin, Heidelberg, 2005. 35 | 36 | (Deterministic authenticated-encryption) Abbadi, Mohammad, et al. "Deterministic authenticated-encryption: A provable-security treatment of the keywrap problem." Journal of Applied Sciences 8.21 (1996): pp-1. 37 | 38 | 39 | 40 | ## Primitives 41 | 42 | * HMAC-SHA-512 43 | * HMAC-SHA-512-256 44 | * ChaCha20 45 | * scrypt 46 | * PBKDF2-SHA-256 47 | * PBKDF2-SHA-512 48 | * SHA-512 49 | * SHA-256 50 | 51 | 52 | 53 | ## Keys 54 | 55 | 1024-bit keys are used because keying material here is "free" and they are the exact size that HMAC-SHA-512 ends up using. 56 | 57 | ``` 58 | SivEncryptionKeys: 59 | * siv_key: 1024-bits 60 | * kdf_key: 1024-bits 61 | ``` 62 | 63 | 64 | 65 | ## Functions 66 | 67 | ### SivEncrypt 68 | `aad` is Additional Authenticated Data. AAD is not included in the resulting ciphertext, but it is used as part of the authentication and thus SIV generation. The same plaintext will encrypt differently if the AAD is different. AAD is useful, for example, for associating an Archive's Metadata with an ArchiveId. In this manner an archive's components cannot be mixed up, otherwise we would detect an authentication failure. 69 | 70 | The returned SIV can be treated as a unique, deterministic identifier (ID) for the (aad, plaintext) pair. The ID does not need to be secret. 71 | 72 | ``` 73 | SivEncrypt (keys: SivEncyptionKeys, aad: [u8], plaintext: [u8]) -> ([u8; 32], [u8]) 74 | mac_data = Encode (a=aad, b=plaintext) 75 | siv = HMAC-SHA-512-256 (key=keys.siv_key, data=mac_data) 76 | ciphertext = Cipher (key=keys.kdf_key, nonce=siv, data=plaintext) 77 | 78 | return siv, ciphertext 79 | ``` 80 | 81 | ### SivDecrypt 82 | ``` 83 | SivDecrypt (keys: SivEncryptionKeys, siv: [u8; 32], aad: [u8], ciphertext: [u8]) -> [u8] 84 | plaintext = Cipher (key=keys.kdf_key, nonce=siv, data=ciphertext) 85 | mac_data = Encode (a=aad, b=plaintext) 86 | expected_siv = HMAC-SHA-512-256 (key=keys.siv_key, data=mac_data) 87 | assert!(constant_time_eq (siv, expected_siv)) 88 | 89 | return plaintext 90 | ``` 91 | 92 | ### PassphraseEncrypt 93 | ``` 94 | PassphraseEncrypt (passphrase: String, plaintext: [u8]) -> [u8] 95 | salt = csrandom(32) 96 | params = time_scrypt (1 hour) 97 | keys = scrypt (params, salt, passphrase) 98 | siv, ciphertext = SivEncrypt (keys, salt || params, plaintext) 99 | 100 | return salt || params || siv || ciphertext 101 | ``` 102 | 103 | ### PassphraseDecrypt 104 | It's important to sanity check the params. An attacker could, for example, give us parameters which tell us to run scrypt for several years, use all our RAM, etc. Though this is not dangerous, it is a DoS vector. 105 | 106 | ``` 107 | PassphraseDecrypt (passphrase: String, sealed_data: [u8]) -> [u8] 108 | salt, params, siv, ciphertext = sealed_data 109 | sanity_check_params (params) 110 | keys = scrypt (params, salt, passphrase) 111 | plaintext = SivDecrypt (keys, siv, salt || params, ciphertext) 112 | 113 | return plaintext 114 | ``` 115 | 116 | ### Cipher 117 | `Cipher` is symmetrical; it is both the encryption and decryption function. It behaves as an IND$-secure cipher with a 1024-bit key and 256-bit nonce. 118 | 119 | ``` 120 | Cipher (key: [u8; 128], nonce: [u8; 32], data: [u8]) 121 | chacha_key, chacha_nonce = HMAC-SHA-512 (key, nonce).split (32) 122 | 123 | return ChaCha20 (chacha_key, chacha_nonce[:8], data) 124 | ``` 125 | 126 | ### Encode 127 | Uniquely encodes the AAD and plaintext for MAC calculation. 128 | 129 | For all `A`, `B`, `C`, and `D` where `(A, B) != (C, D)` it is true that `Encode(A, B) != Encode(C, D)`. 130 | 131 | ``` 132 | Encode (a: [u8], b: [u8]) 133 | return a || b || le64encode (a.length) || le64encode (b.length) 134 | ``` 135 | 136 | 137 | 138 | ## Block 139 | 140 | ### Encryption 141 | 142 | Given the plaintext for a block, encryption is as follows: 143 | 144 | ``` 145 | BlockId, EncryptedBlock = SivEncrypt (Keystore.block, [], Block) 146 | ``` 147 | 148 | Store `BlockId = EncryptedBlock` in the backend. Reference using `BlockId` in the archive. 149 | 150 | 151 | ### Decryption 152 | 153 | The decryption is as follows (after retreiving BlockId and EncryptedBlock from the backend): 154 | 155 | ``` 156 | Block = SivDecrypt (Keystore.block, BlockId, [], EncryptedBlock) 157 | ``` 158 | 159 | 160 | ### Notes 161 | 162 | Our encryption scheme ensures that given the same Block and Keystore, BlockId and EncryptedBlock will always be the same, allowing deduplication. 163 | 164 | 165 | 166 | ## Archive 167 | 168 | ### Encryption 169 | 170 | ``` 171 | ArchiveId, EncryptedName = SivEncrypt (Keystore.archive_name, [], Name) 172 | BlocklistId, _ = SivEncrypt (Keystore.archive_blocklist, ArchiveId || Blocklist, []) 173 | MetadataId, EncryptedMetadata = SivEncrypt (Keystore.archive_metadata, ArchiveId, Metadata) 174 | ``` 175 | 176 | Use `ArchiveId` to refer to the archive on the backend. Store `ArchiveId = EncryptedName, BlocklistId, Blocklist, MetadataId, EncryptedMetadata` on the backend. 177 | 178 | 179 | ### Decryption 180 | 181 | ``` 182 | Name = SivDecrypt (Keystore.archive_name, ArchiveId, [], None) 183 | _ = SivDecrypt (Keystore.archive_blocklist, BlocklistId, ArchiveId || Blocklist, []) 184 | Metadata = SivDecrypt (Keystore.archive_metadata, MetadataId, ArchiveId, EncryptedMetadata) 185 | ``` 186 | 187 | 188 | ### Notes 189 | 190 | Blocklist is left plaintext so the backend can read it and use it for refcounting. The BlockIds themselves are opaque and don't reveal any sensitive information, other than which blocks are associated with which archives (which the backend could infer from usage patterns regardless). 191 | 192 | Archives are stored in pieces (EncryptedName, Blocklist, EncryptedMetadata, etc) so the backend can return any piece when asked. For example, when asked to list all the archives the backend can just return a list of EncryptedNames. 193 | 194 | It is important to run `SivDecrypt` on the BlocklistId to authenticate the Blocklist. 195 | 196 | 197 | 198 | ## Keystore 199 | 200 | A Keystore contains all the keys needed to encrypt and decrypt backups. A Keystore is derived from a 1024-bit MasterKey. We use this derivation scheme so that we can easily add other derived keys to the Keystore in later versions if necessary. 201 | 202 | The Keystore is derived from the MasterKey using `PBKDF2-HMAC-512 (password=MasterKey, salt='', iterations=1, length=*)`, where length depends on the amount of keying material that Keystore needs. It is important to note that `PBKDF2(length=100)` is equal to `PBKDF2(length=200)[..100]` as long as the other parameters are the same. This is what allows us to add new keys to the Keystore later. 203 | 204 | The MasterKey can be encrypted using a passphrase and stored on one or several backends. Encryption uses scrypt KDF with parameters that require an hour on the average computer. This hellishly difficult KDF is used because the passphrase is rarely needed (only during recovery on a new computer) and it provides exceptional security in the case where the encrypted MasterKey is leaked. This makes it safer for MasterKeys to be stored on backends, which allows a more convenient system (users can fully recover by just having access to a backend and their password). 205 | 206 | Preserve is expected to keep a decrypted copy of the Keystore locally, so backups can be made without the user's password. 207 | 208 | The Keystore has separate sets of encryption keys for every type of object that gets encrypted (Blocks, Archive names, etc). Keying material is "free", so we might as well. It also means that the set of IDs for each type of object is different, so we don't accidentally mix up data. 209 | 210 | ``` 211 | Keystore: 212 | block: SivEncryptionKeys 213 | archive_name: SivEncryptionKeys 214 | archive_blocklist: SivEncryptionKeys 215 | archive_metadata: SivEncryptionKeys 216 | ``` 217 | 218 | 219 | ### Encryption 220 | 221 | ``` 222 | EncryptedMasterKey = PassphraseEncrypt (Passphrase, MasterKey) 223 | ``` 224 | 225 | Store `EncryptedMasterKey` 226 | 227 | 228 | ### Decryption 229 | 230 | ``` 231 | MasterKey = PassphraseDecrypt (Passphrase, EncryptedMasterKey) 232 | ``` 233 | 234 | -------------------------------------------------------------------------------- /src/archive.rs: -------------------------------------------------------------------------------- 1 | use crate::keystore::{BlockId, KeyStore, ArchiveId, EncryptedArchiveName, EncryptedArchiveMetadata}; 2 | use lzma; 3 | use crate::error::*; 4 | use serde_derive::{Serialize, Deserialize}; 5 | 6 | 7 | #[derive(Serialize, Deserialize, PartialEq, Clone)] 8 | pub struct File { 9 | /// Path, relative to the archive 10 | pub path: String, 11 | /// true if this is a directory, false if it's a file 12 | pub is_dir: bool, 13 | /// If specified, then this File is a symlink with the link path specified 14 | pub symlink: Option, 15 | /// If specified, this is a unique id for identifying all the links to a hardlink 16 | pub hardlink_id: Option, 17 | /// File mode (right now, it should just be permissions, since only directories and regular files are archived) 18 | pub mode: u32, 19 | /// Modification time (combine with mtime_nsec) 20 | pub mtime: i64, 21 | pub mtime_nsec: i64, 22 | /// User id 23 | pub uid: u32, 24 | /// Group id 25 | pub gid: u32, 26 | /// File size 27 | pub size: u64, 28 | /// Data blocks (list of block ids) 29 | pub blocks: Vec, 30 | } 31 | 32 | 33 | /// An archive has some metadata, but it is primarily just a list of files. 34 | /// While the original filesystem was likely a file tree, we squash it to a simple list, since dealing 35 | /// with it as a tree would require lots of extra, nasty code. 36 | /// The list is ordered such that folders are listed before the children inside of them, making restore 37 | /// easy. 38 | #[derive(Serialize, Deserialize)] 39 | pub struct Archive { 40 | pub version: u32, 41 | pub name: String, 42 | pub original_path: String, 43 | pub files: Vec, 44 | } 45 | 46 | 47 | impl Archive { 48 | pub fn encrypt(self, keystore: &KeyStore) -> Result<(ArchiveId, EncryptedArchiveName, EncryptedArchiveMetadata)> { 49 | let (archive_id, encrypted_name) = keystore.encrypt_archive_name(&self.name); 50 | 51 | let encoded = serde_json::to_vec(&self).expect("internal error"); // Serde shouldn't fail 52 | let compressed = lzma::compress(&encoded, 9 | lzma::EXTREME_PRESET).expect("internal error"); // Compression shouldn't fail 53 | let encrypted_archive = keystore.encrypt_archive_metadata(&archive_id, &compressed); 54 | 55 | Ok((archive_id, encrypted_name, encrypted_archive)) 56 | } 57 | 58 | pub fn decrypt(archive_id: &ArchiveId, encrypted_archive: &EncryptedArchiveMetadata, keystore: &KeyStore) -> Result { 59 | let compressed = keystore.decrypt_archive_metadata(archive_id, encrypted_archive)?; 60 | let decompressed = lzma::decompress(&compressed).map_err(|_| Error::CorruptArchiveFailedDecompression)?; 61 | serde_json::from_slice(&decompressed).map_err(|_| Error::CorruptArchiveBadJson) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/backend/file.rs: -------------------------------------------------------------------------------- 1 | use crate::backend::Backend; 2 | use crate::keystore::{ArchiveId, EncryptedArchiveName, EncryptedArchiveMetadata, EncryptedBlock, BlockId}; 3 | use std::path::{Path, PathBuf}; 4 | use std::io::{Read, Write}; 5 | use std::fs::{self, OpenOptions}; 6 | use rand::rngs::OsRng; 7 | use rand::Rng; 8 | use std::os::unix::fs::{MetadataExt, PermissionsExt}; 9 | use std::str::FromStr; 10 | use crate::error::*; 11 | 12 | 13 | pub struct FileBackend { 14 | backup_dir: PathBuf, 15 | } 16 | 17 | impl FileBackend { 18 | pub fn new>(backup_dir: P) -> FileBackend { 19 | FileBackend { 20 | backup_dir: backup_dir.as_ref().to_path_buf(), 21 | } 22 | } 23 | 24 | fn safely_write_file>(&self, destination: P, data: &[u8]) -> Result<()> { 25 | // First, write to a temporary file. 26 | let temppath = { 27 | let tempname: String = OsRng.sample_iter(&rand::distributions::Alphanumeric).take(32).collect(); 28 | let temppath = self.backup_dir.join("temp"); 29 | fs::create_dir_all(&temppath).unwrap_or(()); 30 | temppath.join(tempname) 31 | }; 32 | 33 | { 34 | let mut file = OpenOptions::new().write(true).create(true).open(&temppath)?; 35 | 36 | file.write_all(data)?; 37 | } 38 | 39 | // Archives and Blocks should be stored as world readonly 40 | fs::set_permissions(&temppath, PermissionsExt::from_mode(0o444))?; 41 | 42 | // Ensure that temppath and the destination are both on the same device so that rename 43 | // below is an atomic move operation, rather than a copy. 44 | { 45 | let temppath_metadata = temppath.metadata()?; 46 | let destination_parent = destination.as_ref().parent().ok_or(Error::BackendOnDifferentDevices)?; 47 | let destination_parent_metadata = destination_parent.metadata()?; 48 | if temppath_metadata.dev() != destination_parent_metadata.dev() { 49 | return Err(Error::BackendOnDifferentDevices); 50 | } 51 | } 52 | 53 | // Then move the file to its final destination. This avoids any truncation in case of early 54 | // termination/crash. 55 | fs::rename(temppath, destination)?; 56 | 57 | Ok(()) 58 | } 59 | } 60 | 61 | impl Backend for FileBackend { 62 | fn block_exists(&mut self, id: &BlockId) -> Result { 63 | let block_id = id.to_string(); 64 | let dir1 = &block_id[0..2]; 65 | let dir2 = &block_id[2..4]; 66 | 67 | let path = self.backup_dir.join("blocks").join(dir1).join(dir2).join(&block_id); 68 | 69 | Ok(path.exists()) 70 | } 71 | 72 | fn store_block(&mut self, id: &BlockId, data: &EncryptedBlock) -> Result<()> { 73 | let block_id = id.to_string(); 74 | let dir1 = &block_id[0..2]; 75 | let dir2 = &block_id[2..4]; 76 | 77 | let path = { 78 | let path = self.backup_dir.join("blocks").join(dir1).join(dir2); 79 | fs::create_dir_all(&path).unwrap_or(()); 80 | path.join(&block_id) 81 | }; 82 | 83 | if path.exists() { 84 | return Ok(()); 85 | } 86 | 87 | self.safely_write_file(path, &data.0) 88 | } 89 | 90 | fn fetch_block(&mut self, id: &BlockId) -> Result { 91 | let block_id = id.to_string(); 92 | let dir1 = &block_id[0..2]; 93 | let dir2 = &block_id[2..4]; 94 | 95 | let path = self.backup_dir.join("blocks").join(dir1).join(dir2).join(&block_id); 96 | let mut file = fs::File::open(path)?; 97 | 98 | let mut ciphertext = Vec::::new(); 99 | 100 | file.read_to_end(&mut ciphertext)?; 101 | 102 | Ok(EncryptedBlock(ciphertext)) 103 | } 104 | 105 | fn fetch_archive(&mut self, id: &ArchiveId) -> Result { 106 | let path = self.backup_dir.join("archives").join(format!("{}.metadata", id.to_string())); 107 | 108 | let data = fs::read(path)?; 109 | 110 | Ok(EncryptedArchiveMetadata(data)) 111 | } 112 | 113 | fn store_archive(&mut self, id: &ArchiveId, name: &EncryptedArchiveName, data: &EncryptedArchiveMetadata) -> Result<()> { 114 | let name_path = self.backup_dir.join("archives").join(format!("{}.name", id.to_string())); 115 | let metadata_path = self.backup_dir.join("archives").join(format!("{}.metadata", id.to_string())); 116 | fs::create_dir_all(&self.backup_dir.join("archives")).unwrap_or(()); 117 | 118 | // TODO: Right now there is a race condition here. This will be fixed in the future when we add a SQLite database for managing refcounts and other atomic things. 119 | if name_path.exists() { 120 | return Err(Error::ArchiveNameConflict); 121 | } 122 | 123 | self.safely_write_file(name_path, &name.0)?; 124 | self.safely_write_file(metadata_path, &data.0) 125 | } 126 | 127 | fn list_archives(&mut self) -> Result> { 128 | let mut archives = Vec::new(); 129 | 130 | for entry in fs::read_dir(self.backup_dir.join("archives"))? { 131 | let path = entry?.path(); 132 | 133 | let extension = path.extension().ok_or(Error::InvalidArchiveId)?; 134 | if extension != "name" { 135 | continue; 136 | } 137 | 138 | let filename = path.file_stem().ok_or(Error::InvalidArchiveId)?; 139 | let filename_str = filename.to_str().ok_or(Error::InvalidArchiveId)?; 140 | let archive_id = ArchiveId::from_str(filename_str).map_err(|_| Error::InvalidArchiveId)?; 141 | let data = fs::read(path)?; 142 | let encrypted_archive_name = EncryptedArchiveName(data); 143 | 144 | archives.push((archive_id, encrypted_archive_name)); 145 | } 146 | 147 | Ok(archives) 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/backend/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::keystore::{ArchiveId, EncryptedArchiveName, EncryptedArchiveMetadata, EncryptedBlock, BlockId}; 2 | use crate::error::*; 3 | use url::Url; 4 | 5 | pub mod file; 6 | 7 | pub use crate::backend::file::FileBackend; 8 | 9 | 10 | pub trait Backend { 11 | fn block_exists(&mut self, id: &BlockId) -> Result; 12 | fn store_block(&mut self, id: &BlockId, data: &EncryptedBlock) -> Result<()>; 13 | fn fetch_block(&mut self, id: &BlockId) -> Result; 14 | 15 | fn store_archive(&mut self, id: &ArchiveId, name: &EncryptedArchiveName, data: &EncryptedArchiveMetadata) -> Result<()>; 16 | fn fetch_archive(&mut self, id: &ArchiveId) -> Result; 17 | fn list_archives(&mut self) -> Result>; 18 | } 19 | 20 | 21 | /// Given a backend path, return a Box'd Backend. 22 | pub fn backend_from_backend_path(path: &str) -> Result> { 23 | let url = Url::parse(path).map_err(|_| Error::BadBackendPath("Given backend path could not be understood.".to_string()))?; 24 | 25 | match url.scheme() { 26 | "file" => Ok(Box::new(FileBackend::new(url.path()))), 27 | e => return Err(Error::BadBackendPath(format!("Unknown backend: {}", e))), 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/cmds/create/mod.rs: -------------------------------------------------------------------------------- 1 | use rusqlite::types::ToSql; 2 | use crate::keystore::{KeyStore, BlockId}; 3 | use std::fs; 4 | use std::io::{Read, BufReader}; 5 | use std::path::{Path, PathBuf}; 6 | use std::os::unix::fs::MetadataExt; 7 | use std::string::ToString; 8 | use crate::backend::{self, Backend}; 9 | use crate::archive::{self, Archive}; 10 | use rusqlite; 11 | use std::collections::{HashSet, HashMap}; 12 | use std::env; 13 | use clap::ArgMatches; 14 | use crate::error::*; 15 | use log::{warn, error, info, debug}; 16 | 17 | 18 | pub fn execute(args: &ArgMatches) { 19 | let mut config = Config::default(); 20 | let args_keyfile = args.value_of("keyfile").expect("internal error"); 21 | let args_backend = args.value_of("backend").expect("internal error"); 22 | let backup_name = args.value_of("NAME").expect("internal error"); 23 | let target_directory = Path::new(args.value_of("PATH").expect("internal error")); 24 | let exclude_paths: Vec<&str> = args.values_of("exclude").unwrap_or(clap::Values::default()).collect(); 25 | 26 | config.dereference_symlinks = args.is_present("dereference"); 27 | config.one_file_system = args.is_present("one-file-system"); 28 | 29 | let keystore = match KeyStore::load_from_path(args_keyfile) { 30 | Ok(keystore) => keystore, 31 | Err(err) => { 32 | error!("Unable to load keyfile: {}", err); 33 | return; 34 | } 35 | }; 36 | 37 | let mut backend = match backend::backend_from_backend_path(args_backend) { 38 | Ok(backend) => backend, 39 | Err(err) => { 40 | error!("Unable to load backend: {}", err); 41 | return; 42 | } 43 | }; 44 | 45 | // Build archive 46 | let archive = { 47 | let mut builder = match ArchiveBuilder::new(config, &target_directory, &mut *backend, &keystore) { 48 | Ok(builder) => builder, 49 | Err(err) => { 50 | error!("There was a problem initializing the archive builder: {}", err); 51 | return; 52 | }, 53 | }; 54 | 55 | // Add user specified excludes 56 | for path in exclude_paths { 57 | builder.path_ignore_list.insert(PathBuf::from(path)); 58 | } 59 | 60 | info!("Gathering list of files..."); 61 | match builder.walk() { 62 | Ok(_) => (), 63 | Err(err) => { 64 | error!("{}", err); 65 | return; 66 | } 67 | } 68 | info!("Reading files..."); 69 | match builder.read_files() { 70 | Ok(_) => (), 71 | Err(Error::Sqlite(err)) => { 72 | error!("There was a problem accessing the cache database: {}", err); 73 | return; 74 | } 75 | Err(err) => { 76 | error!("There was a problem while reading the files: {}", err); 77 | return; 78 | } 79 | } 80 | builder.warn_about_missing_symlinks(); 81 | builder.warn_about_missing_hardlinks(); 82 | 83 | match builder.create_archive(&backup_name) { 84 | Ok(archive) => archive, 85 | Err(err) => { 86 | error!("{}", err); 87 | return; 88 | } 89 | } 90 | }; 91 | 92 | info!("Writing archive..."); 93 | let (archive_id, encrypted_archive_name, encrypted_archive) = match archive.encrypt(&keystore) { 94 | Ok(x) => x, 95 | Err(err) => { 96 | error!("There was a problem encrypting the backup: {}", err); 97 | return; 98 | } 99 | }; 100 | match backend.store_archive(&archive_id, &encrypted_archive_name, &encrypted_archive) { 101 | Ok(_) => (), 102 | Err(err) => { 103 | error!("There was a problem storing the archive: {}", err); 104 | return; 105 | } 106 | } 107 | info!("Backup created successfully"); 108 | } 109 | 110 | 111 | #[derive(Default)] 112 | struct Config { 113 | /// If true, follow symlinks. 114 | /// If false, symlinks are saved as symlinks in the archive. 115 | dereference_symlinks: bool, 116 | 117 | /// If true, we will skip all files/directories that reside on other filesystems. 118 | one_file_system: bool, 119 | } 120 | 121 | /// Used to uniquely identify a file during backup creation, so we can 122 | /// easily skip certain files (like our cache databases). 123 | #[derive(Eq, PartialEq, Hash)] 124 | struct FileIdentifier { 125 | devid: u64, 126 | inode: u64, 127 | } 128 | 129 | struct HardLink { 130 | /// How many links exist to this inode, on the user's system. 131 | expected_links: u64, 132 | /// We assign a unique id to each hardlink during backup creation, 133 | /// which, in the archive, is then assigned to each file involved in the hardlink. 134 | /// We could use (devid, inode), but that seems wasteful and perhaps non-portable. 135 | /// So we'll just assign our own id, unique within the archive, using a simple counter. 136 | id: u64, 137 | /// Used for error reporting; just one of the paths that points to this inode. 138 | example_path: PathBuf, 139 | } 140 | 141 | // Wrap File so we can keep track of a few extra things while building the archive 142 | struct ArchiveBuilderFile { 143 | file: archive::File, 144 | missing: bool, 145 | canonical_path: Option, 146 | } 147 | 148 | struct ArchiveBuilder<'a> { 149 | config: Config, 150 | base_path: PathBuf, 151 | hardlink_map: HashMap, 152 | last_hardlink_id: u64, 153 | total_size: u64, 154 | /// Any filesystem entries with a matching devid+inode will be ignored. 155 | inode_ignore_list: HashSet, 156 | /// Any filesystem entries with a matching path will be ignored. 157 | /// Currently only checks directories. 158 | path_ignore_list: HashSet, 159 | files: Vec, 160 | backend: &'a mut dyn Backend, 161 | keystore: &'a KeyStore, 162 | } 163 | 164 | impl<'a> ArchiveBuilder<'a> { 165 | fn new>(config: Config, base_path: P, backend: &'a mut dyn Backend, keystore: &'a KeyStore) -> Result> { 166 | let base_path = if base_path.as_ref().is_relative() { 167 | env::current_dir()?.join(base_path) 168 | } else { 169 | PathBuf::from(base_path.as_ref()) 170 | }; 171 | 172 | let mut inode_ignore_list = HashSet::new(); 173 | 174 | // Don't archive our cache file 175 | if let Ok(metadata) = Path::new("cache.sqlite").metadata() { 176 | inode_ignore_list.insert(FileIdentifier { 177 | devid: metadata.dev(), 178 | inode: metadata.ino(), 179 | }); 180 | } 181 | 182 | let mut path_ignore_list = HashSet::new(); 183 | 184 | // TODO: Make it possible to disable these with a command line flag 185 | path_ignore_list.insert(PathBuf::from("/proc")); 186 | path_ignore_list.insert(PathBuf::from("/sys")); 187 | path_ignore_list.insert(PathBuf::from("/dev")); 188 | path_ignore_list.insert(PathBuf::from("/run")); 189 | path_ignore_list.insert(PathBuf::from("/tmp")); 190 | 191 | Ok(ArchiveBuilder { 192 | config, 193 | base_path, 194 | hardlink_map: HashMap::new(), 195 | total_size: 0, 196 | last_hardlink_id: 0, 197 | inode_ignore_list, 198 | path_ignore_list, 199 | files: Vec::new(), 200 | backend, 201 | keystore, 202 | }) 203 | } 204 | 205 | fn open_cache_db(&self) -> Result { 206 | let db = rusqlite::Connection::open("cache.sqlite")?; 207 | 208 | db.execute("CREATE TABLE IF NOT EXISTS mtime_cache ( 209 | path TEXT NOT NULL, 210 | mtime INTEGER NOT NULL, 211 | mtime_nsec INTEGER NOT NULL, 212 | size INTEGER NOT NULL, 213 | blocks TEXT NOT NULL 214 | )", rusqlite::NO_PARAMS)?; 215 | 216 | db.execute("CREATE INDEX IF NOT EXISTS idx_mtime_cache_path_mtime_size ON mtime_cache (path, mtime, mtime_nsec, size);", rusqlite::NO_PARAMS)?; 217 | db.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_mtime_cache_path ON mtime_cache (path);", rusqlite::NO_PARAMS)?; 218 | 219 | Ok(db) 220 | } 221 | 222 | // Walk the file tree from self.base_path, gathering metadata about all the files 223 | fn walk(&mut self) -> Result<()> { 224 | self.files = Vec::new(); 225 | self.total_size = 0; 226 | 227 | let base_path = self.base_path.clone(); 228 | let base_path_metadata = self.base_path.metadata()?; 229 | let current_filesystem = if self.config.one_file_system { Some(base_path_metadata.dev()) } else { None }; 230 | let mut unscanned_paths: Vec = Vec::new(); 231 | 232 | unscanned_paths.extend(self.list_directory_children(&base_path)); 233 | 234 | while let Some(path) = unscanned_paths.pop() { 235 | let file = match self.read_file_metadata(path, current_filesystem) { 236 | Some(file) => file, 237 | None => continue, 238 | }; 239 | 240 | if file.file.symlink.is_none() && file.file.is_dir { 241 | unscanned_paths.extend(self.list_directory_children(base_path.join(&file.file.path))); 242 | } 243 | 244 | self.total_size += file.file.size; 245 | self.files.push(file); 246 | } 247 | 248 | Ok(()) 249 | } 250 | 251 | fn create_archive(&self, name: &str) -> Result { 252 | let files: Vec = self.files.iter().map(|file| file.file.clone()).collect(); 253 | 254 | Ok(Archive { 255 | version: 0x00000001, 256 | name: name.to_owned(), 257 | original_path: self.base_path.canonicalize()?.to_string_lossy().to_string(), 258 | files: files, 259 | }) 260 | } 261 | 262 | // Given a path, read the metadata for the file, handle symlinks, hardlinks, etc and return an ArchiveBuilderFile or None if a problem was encountered. 263 | fn read_file_metadata>(&mut self, path: P, current_filesystem: Option) -> Option { 264 | // First, let's see if it's a symlink 265 | let symlink_metadata = match path.as_ref().symlink_metadata() { 266 | Ok(metadata) => metadata, 267 | Err(err) => { 268 | warn!("Unable to read metadata for '{}'. It will not be included in the archive. The following error was received: {}", path.as_ref().display(), err); 269 | return None 270 | }, 271 | }; 272 | 273 | // Skip files, symlinks, etc that don't reside on the current filesystem we're walking, if --one-file-system is enabled 274 | if let Some(current_filesystem) = current_filesystem { 275 | if symlink_metadata.dev() != current_filesystem { 276 | warn!("'{}' is being skipped because of --one-file-system.", path.as_ref().display()); 277 | return None 278 | } 279 | } 280 | 281 | if self.should_ignore(&symlink_metadata, path.as_ref()) { 282 | warn!("'{}' is being skipped because it is ignored.", path.as_ref().display()); 283 | return None; 284 | } 285 | 286 | // If we encounter a symlink, and we aren't dereferencing, then we will 287 | // store information about the symlink, and all metadata will be about 288 | // the symlink (not the file/folder it points to). 289 | // If we derference the symlink then all metadata will be about the 290 | // file/folder the symlink points to. 291 | let (metadata, symlink_path) = if symlink_metadata.file_type().is_symlink() && !self.config.dereference_symlinks { 292 | let symlink_path: String = match fs::read_link(path.as_ref()) { 293 | Ok(symlink_path) => match symlink_path.to_str() { 294 | Some(symlink_path_str) => symlink_path_str.to_string(), 295 | None => { 296 | warn!("Unable to read symlink for '{}' as UTF-8 string. It will not be included in the archive.", path.as_ref().display()); 297 | return None 298 | }, 299 | }, 300 | Err(err) => { 301 | warn!("Unable to read symlink for '{}'. It will not be included in the archive. The following error was received: {}", path.as_ref().display(), err); 302 | return None 303 | }, 304 | }; 305 | 306 | (path.as_ref().symlink_metadata(), Some(symlink_path)) 307 | } else { 308 | (path.as_ref().metadata(), None) 309 | }; 310 | 311 | let metadata = match metadata { 312 | Ok(metadata) => metadata, 313 | Err(err) => { 314 | warn!("Unable to read metadata for '{}'. It will not be included in the archive. The following error was received: {}", path.as_ref().display(), err); 315 | return None; 316 | }, 317 | }; 318 | 319 | // Skip files, symlinks, etc that don't reside on the current filesystem we're walking, if --one-file-system is enabled 320 | if let Some(current_filesystem) = current_filesystem { 321 | if metadata.dev() != current_filesystem { 322 | warn!("'{}' is being skipped because of --one-file-system.", path.as_ref().display()); 323 | return None 324 | } 325 | } 326 | 327 | if self.should_ignore(&metadata, path.as_ref()) { 328 | warn!("'{}' is being skipped because it is ignored.", path.as_ref().display()); 329 | return None; 330 | } 331 | 332 | // Skip anything that isn't a symlink, regular file, or directory. 333 | if symlink_path.is_none() && !metadata.is_file() && !metadata.is_dir() { 334 | warn!("Skipping '{}' because it is not a symlink, directory, or regular file.", path.as_ref().display()); 335 | return None; 336 | } 337 | 338 | let filesize = if symlink_path.is_none() && metadata.is_file() { 339 | metadata.len() 340 | } else { 341 | 0 342 | }; 343 | 344 | let canonical_path = match path.as_ref().canonicalize() { 345 | Ok(canonical_path) => Some(canonical_path), 346 | Err(_) => None, 347 | }; 348 | 349 | // The path stored in the archive is relative to the archive's base_path 350 | let filepath = match path.as_ref().strip_prefix(&self.base_path) { 351 | Ok(filepath) => match filepath.to_str () { 352 | Some(filepath) => filepath.to_string(), 353 | None => { 354 | warn!("Unable to read path of '{}' as UTF-8 string. It will not be included in the archive.", path.as_ref().display()); 355 | return None 356 | } 357 | }, 358 | Err(_) => { 359 | warn!("An internal error occured involving strip_prefix. The file '{}' will not be included in the archive.", path.as_ref().display()); 360 | return None 361 | } 362 | }; 363 | 364 | // Handle hardlinks 365 | let hardlink_id = if metadata.nlink() > 1 && !metadata.is_dir() { 366 | let key = FileIdentifier { 367 | devid: metadata.dev(), 368 | inode: metadata.ino(), 369 | }; 370 | 371 | let next_hardlink_id = self.last_hardlink_id; 372 | 373 | let entry = self.hardlink_map.entry(key).or_insert_with(|| { 374 | HardLink { 375 | expected_links: metadata.nlink(), 376 | id: next_hardlink_id, 377 | example_path: PathBuf::from(path.as_ref()), 378 | } 379 | }); 380 | 381 | if entry.id == self.last_hardlink_id { 382 | self.last_hardlink_id += 1; 383 | } 384 | 385 | Some(entry.id) 386 | } else { 387 | None 388 | }; 389 | 390 | Some(ArchiveBuilderFile { 391 | file: archive::File { 392 | path: filepath, 393 | is_dir: metadata.is_dir(), 394 | symlink: symlink_path, 395 | hardlink_id: hardlink_id, 396 | mode: metadata.mode(), 397 | mtime: metadata.mtime(), 398 | mtime_nsec: metadata.mtime_nsec(), 399 | uid: metadata.uid(), 400 | gid: metadata.gid(), 401 | size: filesize, 402 | blocks: Vec::new(), 403 | }, 404 | missing: false, 405 | canonical_path: canonical_path, 406 | }) 407 | } 408 | 409 | /// Assuming that path is a directory, this function returns a list of 410 | /// all entries inside that directory. 411 | fn list_directory_children>(&mut self, path: P) -> Vec { 412 | let mut children = Vec::new(); 413 | 414 | let entries = match path.as_ref().read_dir() { 415 | Ok(entries) => entries, 416 | Err(err) => { 417 | warn!("Unable to read directory '{}'. The following error was received: {}", path.as_ref().display(), err); 418 | return Vec::new(); 419 | } 420 | }; 421 | 422 | for entry in entries { 423 | let entry = match entry { 424 | Ok(x) => x, 425 | Err(err) => { 426 | warn!("Unable to read contents of directory '{}'. The following error was received: {}", path.as_ref().display(), err); 427 | return Vec::new(); 428 | } 429 | }; 430 | 431 | children.push(entry.path()); 432 | } 433 | 434 | children 435 | } 436 | 437 | /// Determine if the given path should be ignored, given the settings. 438 | fn should_ignore>(&self, metadata: &fs::Metadata, path: P) -> bool { 439 | let identifier = FileIdentifier { 440 | devid: metadata.dev(), 441 | inode: metadata.ino(), 442 | }; 443 | 444 | if self.inode_ignore_list.contains(&identifier) { 445 | return true; 446 | } 447 | 448 | if metadata.is_dir() && self.path_ignore_list.contains(path.as_ref()) { 449 | return true; 450 | } 451 | 452 | false 453 | } 454 | 455 | /// Logs warnings about any hardlinks for which we haven't backed up all the links. 456 | fn warn_about_missing_hardlinks(&self) { 457 | let mut links_found = HashMap::new(); 458 | 459 | for file in &self.files { 460 | if let Some(hardlink_id) = file.file.hardlink_id { 461 | *links_found.entry(hardlink_id).or_insert(0) += 1; 462 | } 463 | } 464 | 465 | for hardlink in self.hardlink_map.values() { 466 | match links_found.get(&hardlink.id) { 467 | Some(links) => { 468 | if links < &hardlink.expected_links { 469 | warn!("A hardlink with {} links was included in this backup, but only {} of those links have been included. One of the links: '{}'", hardlink.expected_links, links, hardlink.example_path.display()); 470 | } 471 | }, 472 | None => { 473 | warn!("A hardlink with {} links was supposed to be included in this backup, but none of those links have been included. One of the links: '{}'", hardlink.expected_links, hardlink.example_path.display()); 474 | } 475 | } 476 | } 477 | } 478 | 479 | /// Logs warnings about any symlinks for which we haven't backed up the file/directory linked. 480 | fn warn_about_missing_symlinks(&self) { 481 | // Create a hashset of all archived paths, and a list of all symlinks. 482 | let mut symlinks = Vec::new(); 483 | let mut paths_archived = HashSet::new(); 484 | 485 | for file in &self.files { 486 | if file.file.symlink.is_some() { 487 | // Calling canonicalize on the symlink will get us the target file/folder 488 | let target = match file.canonical_path.clone() { 489 | Some(target) => target, 490 | None => { 491 | warn!("The symlink '{}' was included in the backup, but the file/directory it links to doesn't exist.", file.file.path); 492 | continue; 493 | } 494 | }; 495 | symlinks.push((file.file.path.clone(), target)); 496 | } else { 497 | match file.canonical_path.clone() { 498 | Some(path) => {paths_archived.insert(path); ()}, 499 | None => (), 500 | } 501 | } 502 | } 503 | 504 | // Now we can go through all symlinks and make sure the file/directory they link to exists. 505 | for (path, symlink) in symlinks { 506 | if paths_archived.contains(&symlink) { 507 | continue; 508 | } 509 | 510 | warn!("The symlink '{}' was included in the backup, but the file/directory it links to, '{}', was not included.", path, symlink.display()); 511 | } 512 | } 513 | 514 | fn read_files(&mut self) -> Result<()> { 515 | let mut progress = 0; 516 | let cache_db = self.open_cache_db()?; 517 | 518 | for file in &mut self.files { 519 | if file.file.is_dir || file.file.symlink.is_some() { 520 | continue; 521 | } 522 | 523 | info!("Reading file: {}", file.file.path); 524 | match read_file(file, &self.base_path, &cache_db, self.keystore, self.backend, progress, self.total_size)? { 525 | Some(blocks) => file.file.blocks.extend(blocks), 526 | None => file.missing = true, 527 | }; 528 | 529 | progress += file.file.size; 530 | info!("Progress: {}MB of {}MB", progress / (1024*1024), self.total_size / (1024*1024)); 531 | } 532 | 533 | self.files.retain(|ref file| !file.missing); 534 | 535 | Ok(()) 536 | } 537 | } 538 | 539 | 540 | fn read_file>(file: &mut ArchiveBuilderFile, base_path: P, cache_db: &rusqlite::Connection, keystore: &KeyStore, backend: &mut dyn Backend, progress: u64, total_size: u64) -> Result>> { 541 | let path = base_path.as_ref().join(&file.file.path); 542 | let canonical_path = match file.canonical_path.clone() { 543 | Some(canonical_path) => canonical_path, 544 | None => { 545 | warn!("Unable to canonicalize path for '{}'. It will not be included in the archive.", path.display()); 546 | return Ok(None); 547 | } 548 | }; 549 | let canonical_path_str = match canonical_path.to_str() { 550 | Some(path) => path, 551 | None => { 552 | warn!("Unable to canonicalize path for '{}'. It is not a UTF-8 string. It will not be included in the archive.", path.display()); 553 | return Ok(None); 554 | } 555 | }; 556 | 557 | // Check to see if we have this file in the cache 558 | let result = cache_db.query_row("SELECT blocks FROM mtime_cache WHERE path=? AND mtime=? AND mtime_nsec=? AND size=?", &[&canonical_path_str.to_owned() as &dyn ToSql, &file.file.mtime, &file.file.mtime_nsec, &(file.file.size as i64)], |row| { 559 | row.get(0) 560 | }); 561 | 562 | match result { 563 | Ok(blocks_str) => { 564 | // The file is cached, but are all the blocks available in the current block store? 565 | let blocks_str: String = blocks_str; 566 | 567 | match serde_json::from_str::>(&blocks_str) { 568 | Ok(blocks) => { 569 | let mut all_blocks_exist = true; 570 | 571 | for block in &blocks { 572 | if !backend.block_exists(block)? { 573 | all_blocks_exist = false; 574 | break; 575 | } 576 | } 577 | 578 | if all_blocks_exist { 579 | debug!("Found in mtime cache."); 580 | return Ok(Some(blocks)); 581 | } 582 | }, 583 | Err(_) => { 584 | warn!("Bad block id encoding in the cache database. The cache database might be corrupted."); 585 | }, 586 | } 587 | }, 588 | Err(rusqlite::Error::QueryReturnedNoRows) => (), 589 | Err(err) => return Err(err.into()), 590 | }; 591 | 592 | // Not cached or missing blocks, so let's actually read the file 593 | let mut retries = 0; 594 | loop { 595 | // Update metadata, in case it changed. 596 | match path.metadata() { 597 | Ok(metadata) => { 598 | file.file.mtime = metadata.mtime(); 599 | file.file.mtime_nsec = metadata.mtime_nsec(); 600 | file.file.size = metadata.size(); 601 | file.file.mode = metadata.mode(); 602 | file.file.uid = metadata.uid(); 603 | file.file.gid = metadata.gid(); 604 | }, 605 | Err(err) => { 606 | warn!("An error was received while checking the metadata for '{}'. It will not be included in the archive. Error message: '{}'.", path.display(), err); 607 | return Ok(None); 608 | } 609 | }; 610 | 611 | // Read file contents 612 | let (blocks, should_retry) = read_file_inner(&path, keystore, backend, progress, total_size, file.file.mtime, file.file.mtime_nsec, file.file.size)?; 613 | 614 | let blocks = match blocks { 615 | Some(blocks) => blocks, 616 | None => { 617 | // Reading failed. Should we retry? 618 | if !should_retry { 619 | return Ok(None) 620 | } 621 | 622 | // Reading failed due to the file changing. Let's retry. 623 | if retries == 2 { 624 | warn!("File '{}' keeps changing or causing I/O errors. It will not be included in the archive.", path.display()); 625 | return Ok(None) 626 | } 627 | 628 | warn!("File changed or we encountered an I/O error, restarting from beginning."); 629 | retries += 1; 630 | continue; 631 | }, 632 | }; 633 | 634 | let blocks_str = serde_json::to_string(&blocks).expect("internal error"); 635 | cache_db.execute("INSERT OR REPLACE INTO mtime_cache (path, mtime, mtime_nsec, size, blocks) VALUES (?,?,?,?,?)", &[&canonical_path_str.to_owned() as &dyn ToSql, &file.file.mtime, &file.file.mtime_nsec, &(file.file.size as i64), &blocks_str])?; 636 | 637 | return Ok(Some(blocks)); 638 | } 639 | } 640 | 641 | 642 | // Used by read_file. read_file checks the cache, etc. This will actually read the file into blocks. 643 | // If any file modifications are detected while reading, this function will return (None, true) to indicate the caller that it should retry (if it wishes). 644 | fn read_file_inner>(path: P, keystore: &KeyStore, backend: &mut dyn Backend, progress: u64, total_size: u64, expected_mtime: i64, expected_mtime_nsec: i64, expected_size: u64) -> Result<(Option>, bool)> { 645 | let reader_file = match fs::File::open(&path) { 646 | Ok(f) => f, 647 | Err(err) => { 648 | warn!("Unable to open file '{}'. The following error was received: {}. It will not be included in the archive.", path.as_ref().display(), err); 649 | return Ok((None, false)) 650 | }, 651 | }; 652 | let reader = BufReader::new(&reader_file); 653 | let reader_ref = reader.get_ref(); 654 | let mut buffer = Vec::::new(); 655 | let mut total_read = 0; 656 | let mut blocks = Vec::new(); 657 | 658 | loop { 659 | buffer.clear(); 660 | match reader_ref.take(1024*1024).read_to_end(&mut buffer) { 661 | Ok(_) => (), 662 | Err(err) => { 663 | // Problem reading the file. Restart. 664 | warn!("An error was encountered while reading '{}': {}", path.as_ref().display(), err); 665 | return Ok((None, true)); 666 | }, 667 | } 668 | 669 | // Check for file modification 670 | match path.as_ref().metadata() { 671 | Ok(metadata) => { 672 | if metadata.mtime() != expected_mtime || metadata.mtime_nsec() != expected_mtime_nsec { 673 | // The file has been modified. Restart. 674 | return Ok((None, true)); 675 | } 676 | }, 677 | Err(err) => { 678 | warn!("An error was received while checking the metadata for '{}'. It will not be included in the archive. Error message: '{}'.", path.as_ref().display(), err); 679 | return Ok((None, false)); 680 | } 681 | }; 682 | 683 | if buffer.is_empty() { 684 | break; 685 | } 686 | 687 | total_read += buffer.len(); 688 | 689 | // Encrypt and store block in backend (if it doesn't already exist) 690 | let (block_id, encrypted_block) = keystore.encrypt_block(&buffer); 691 | 692 | if !backend.block_exists(&block_id)? { 693 | // Block doesn't exist in backend; store it 694 | backend.store_block(&block_id, &encrypted_block)?; 695 | } 696 | 697 | blocks.push(block_id); 698 | 699 | if (total_read % (64*1024*1024)) == 0 { 700 | info!("Progress: {}MB of {}MB", (progress + total_read as u64) / (1024*1024), total_size / (1024*1024)); 701 | } 702 | } 703 | 704 | if total_read as u64 != expected_size { 705 | // File was modified 706 | return Ok((None, true)); 707 | } 708 | 709 | Ok((Some(blocks), false)) 710 | } 711 | -------------------------------------------------------------------------------- /src/cmds/diff.rs: -------------------------------------------------------------------------------- 1 | use clap::ArgMatches; 2 | use log::{error, warn}; 3 | use crate::keystore::KeyStore; 4 | use crate::backend::{self, Backend}; 5 | use crate::archive::{Archive, File}; 6 | use crate::error::Result; 7 | use std::collections::{HashMap, HashSet}; 8 | 9 | 10 | pub fn execute(args: &ArgMatches) { 11 | let backup1_name = args.value_of("NAME1").expect("internal error"); 12 | let backup2_name = args.value_of("NAME2").expect("internal error"); 13 | let args_keyfile = args.value_of("keyfile").expect("internal error"); 14 | let args_backend = args.value_of("backend").expect("internal error"); 15 | 16 | let keystore = match KeyStore::load_from_path(args_keyfile) { 17 | Ok(keystore) => keystore, 18 | Err(err) => { 19 | error!("Unable to load keyfile: {}", err); 20 | return; 21 | } 22 | }; 23 | 24 | let mut backend = match backend::backend_from_backend_path(args_backend) { 25 | Ok(backend) => backend, 26 | Err(err) => { 27 | error!("Unable to load backend: {}", err); 28 | return; 29 | } 30 | }; 31 | 32 | let mut archive1 = match fetch_and_decrypt_archive(backup1_name, &keystore, &mut *backend) { 33 | Ok(archive) => archive, 34 | Err(err) => { 35 | error!("{}", err); 36 | return; 37 | } 38 | }; 39 | 40 | let mut archive2 = match fetch_and_decrypt_archive(backup2_name, &keystore, &mut *backend) { 41 | Ok(archive) => archive, 42 | Err(err) => { 43 | error!("{}", err); 44 | return; 45 | } 46 | }; 47 | 48 | if archive1.version != 0x00000001 || archive2.version != 0x00000001 { 49 | error!("Unsupported archive version"); 50 | return; 51 | } 52 | 53 | if archive1.original_path != archive2.original_path { 54 | warn!("The original paths for the two archives differ. This may or may not be important depending on what you're comparing."); 55 | } 56 | 57 | // TODO: Need to handle hardlinks properly? 58 | // TODO: For now, we know that preserve stores the list of blocks for all files, even those with hardlink_ids, so we can ignore the hardlink_id field. 59 | // TODO: Basically this means that we'll still detect differences in file contents, but we can't report if hardlinks themselves have changed. 60 | for file in &mut archive1.files { 61 | file.hardlink_id = None; 62 | } 63 | 64 | for file in &mut archive2.files { 65 | file.hardlink_id = None; 66 | } 67 | 68 | let archive1_hashmap: HashMap<&String, &File> = archive1.files.iter().map(|file| (&file.path, file)).collect(); 69 | let archive2_hashmap: HashMap<&String, &File> = archive2.files.iter().map(|file| (&file.path, file)).collect(); 70 | 71 | let archive1_hashset: HashSet<&String> = archive1_hashmap.keys().cloned().collect();// archive1.files.iter().map(|file| &file.path).collect(); 72 | let archive2_hashset: HashSet<&String> = archive2_hashmap.keys().cloned().collect();// archive2.files.iter().map(|file| &file.path).collect(); 73 | 74 | // Files in archive2 that aren't in archive1. 75 | archive2_hashset.difference(&archive1_hashset) 76 | .for_each(|path| { 77 | println!("Added: {}", path) 78 | }); 79 | 80 | // Files in archive1 that aren't in archive2. 81 | archive1_hashset.difference(&archive2_hashset) 82 | .for_each(|path| { 83 | println!("Deleted: {}", path) 84 | }); 85 | 86 | // Files that are in both, but have changed. 87 | archive2_hashset.intersection(&archive1_hashset) 88 | .filter(|&path| { 89 | let version1 = archive1_hashmap.get(path).expect("internal error"); 90 | let version2 = archive2_hashmap.get(path).expect("internal error"); 91 | 92 | version1 != version2 93 | }) 94 | .for_each(|path| { 95 | println!("Changed: {}", path); 96 | }); 97 | } 98 | 99 | 100 | fn fetch_and_decrypt_archive(name: &str, keystore: &KeyStore, backend: &mut dyn Backend) -> Result { 101 | let (archive_id, _) = keystore.encrypt_archive_name(&name); 102 | let encrypted_archive = backend.fetch_archive(&archive_id)?; 103 | Archive::decrypt(&archive_id, &encrypted_archive, &keystore) 104 | } -------------------------------------------------------------------------------- /src/cmds/keygen/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::keystore::KeyStore; 2 | use std::fs::OpenOptions; 3 | use std::io::{self, BufWriter, Write}; 4 | use clap::ArgMatches; 5 | use log::error; 6 | 7 | 8 | pub fn execute(args: &ArgMatches) { 9 | // Open output file/stdout for writing 10 | let file: Box = match args.value_of("keyfile") { 11 | Some(path) => { 12 | // Won't overwrite existing file 13 | let file = match OpenOptions::new().write(true).create_new(true).open(path) { 14 | Ok(f) => f, 15 | Err(e) => if e.kind() == io::ErrorKind::AlreadyExists { 16 | error!("'{}' already exists.", path); 17 | return; 18 | } else { 19 | error!("Could not open '{}' for writing: {}", path, e); 20 | return; 21 | }, 22 | }; 23 | Box::new(file) 24 | }, 25 | None => Box::new(io::stdout()), 26 | }; 27 | let mut writer = BufWriter::new(file); 28 | 29 | // Create a new keystore 30 | let keystore = KeyStore::new(); 31 | 32 | // Save the keystore to the destination (file/stdout) 33 | match keystore.save(&mut writer) { 34 | Ok(_) => (), 35 | Err(err) => { 36 | error!("Could not write to keyfile: {}", err); 37 | return; 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/cmds/list/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::keystore::KeyStore; 2 | use crate::backend; 3 | use clap::ArgMatches; 4 | use log::{error, warn}; 5 | 6 | 7 | pub fn execute(args: &ArgMatches) { 8 | let args_keyfile = args.value_of("keyfile").expect("internal error"); 9 | let args_backend = args.value_of("backend").expect("internal error"); 10 | 11 | let keystore = match KeyStore::load_from_path(args_keyfile) { 12 | Ok(keystore) => keystore, 13 | Err(err) => { 14 | error!("Unable to load keyfile: {}", err); 15 | return; 16 | } 17 | }; 18 | 19 | let mut backend = match backend::backend_from_backend_path(args_backend) { 20 | Ok(backend) => backend, 21 | Err(err) => { 22 | error!("Unable to load backend: {}", err); 23 | return; 24 | } 25 | }; 26 | 27 | let encrypted_archive_names = match backend.list_archives() { 28 | Ok(names) => names, 29 | Err(err) => { 30 | error!("There was a problem listing the archives: {}", err); 31 | return; 32 | } 33 | }; 34 | 35 | // TODO: Push into a vec, sort alphabetically, and then print 36 | for (archive_id, encrypted_archive_name) in &encrypted_archive_names { 37 | let archive_name = match keystore.decrypt_archive_name(archive_id, encrypted_archive_name) { 38 | Ok(name) => name, 39 | Err(err) => { 40 | warn!("Could not decrypt one of the archive names belonging to ArchiveID: {}, because: {}", archive_id.to_string(), err); 41 | continue; 42 | } 43 | }; 44 | 45 | println!("{}", archive_name); 46 | } 47 | 48 | if encrypted_archive_names.is_empty() { 49 | println!("No archives found"); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/cmds/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod create; 2 | pub mod keygen; 3 | pub mod list; 4 | pub mod restore; 5 | pub mod verify; 6 | pub mod diff; -------------------------------------------------------------------------------- /src/cmds/restore/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::keystore::{KeyStore, BlockId}; 2 | use std::fs; 3 | use std::io::{self, BufWriter, Write, Read}; 4 | use std::path::{Path, PathBuf}; 5 | use std::os::unix::fs::PermissionsExt; 6 | use std::collections::HashMap; 7 | use crate::backend::{self, Backend}; 8 | use crate::archive::{Archive, File}; 9 | use clap::ArgMatches; 10 | use crate::error::*; 11 | use log::{error, info}; 12 | 13 | 14 | struct DownloadCache { 15 | refcount: u64, 16 | downloaded: bool, 17 | id: BlockId, 18 | } 19 | 20 | 21 | pub fn execute(args: &ArgMatches) { 22 | let debug_decrypt = args.is_present("debug-decrypt"); 23 | 24 | if !debug_decrypt && !args.is_present("PATH") { 25 | error!("Missing option"); 26 | return; 27 | } 28 | 29 | let args_keyfile = args.value_of("keyfile").expect("internal error"); 30 | let args_backend = args.value_of("backend").expect("internal error"); 31 | let backup_name = args.value_of("NAME").expect("internal error"); 32 | let target_directory = match args.value_of("PATH") { 33 | Some(path) => match Path::new(path).canonicalize() { 34 | Ok(path) => path, 35 | Err(err) => { 36 | error!("Unable to find the destination path: {}", err); 37 | return; 38 | }, 39 | }, 40 | None => PathBuf::new(), 41 | }; 42 | 43 | let keystore = match KeyStore::load_from_path(args_keyfile) { 44 | Ok(keystore) => keystore, 45 | Err(err) => { 46 | error!("Unable to load keyfile: {}", err); 47 | return; 48 | } 49 | }; 50 | 51 | let mut backend = match backend::backend_from_backend_path(args_backend) { 52 | Ok(backend) => backend, 53 | Err(err) => { 54 | error!("Unable to load backend: {}", err); 55 | return; 56 | } 57 | }; 58 | 59 | let mut config = Config::default(); 60 | 61 | config.dereference_hardlinks = args.is_present("hard-dereference"); 62 | 63 | let (archive_id, _) = keystore.encrypt_archive_name(&backup_name); 64 | let encrypted_archive = match backend.fetch_archive(&archive_id) { 65 | Ok(archive) => archive, 66 | Err(err) => { 67 | error!("There was a problem fetching the backup: {}", err); 68 | return; 69 | }, 70 | }; 71 | 72 | if debug_decrypt { 73 | let decrypted = match keystore.decrypt_archive_metadata(&archive_id, &encrypted_archive) { 74 | Ok(archive) => archive, 75 | Err(err) => { 76 | error!("There was a problem decrypting the backup: {}", err); 77 | return; 78 | } 79 | }; 80 | io::stdout().write(&decrypted).expect("error while writing to stdout"); 81 | return; 82 | } 83 | 84 | let archive = match Archive::decrypt(&archive_id, &encrypted_archive, &keystore) { 85 | Ok(archive) => archive, 86 | Err(err) => { 87 | error!("There was a problem decrypting the backup: {}", err); 88 | return; 89 | } 90 | }; 91 | 92 | if archive.version != 0x00000001 { 93 | error!("Unsupported archive version"); 94 | return; 95 | } 96 | 97 | let download_cache_dir = match tempfile::Builder::new().prefix("preserve-").tempdir() { 98 | Ok(dir) => dir, 99 | Err(err) => { 100 | error!("There was a problem creating a temporary directory: {}", err); 101 | return; 102 | }, 103 | }; 104 | let mut download_cache = HashMap::new(); 105 | 106 | match build_block_refcounts(&archive.files, &mut download_cache) { 107 | Ok(x) => x, 108 | Err(err) => { 109 | error!("There was a problem reading the backup: {}", err); 110 | return; 111 | }, 112 | } 113 | 114 | match extract_files(&config, &archive.files, target_directory, &keystore, download_cache_dir.path(), &mut download_cache, &mut *backend) { 115 | Ok(x) => x, 116 | Err(err) => { 117 | error!("There was a problem extracting the backup: {}", err); 118 | return; 119 | }, 120 | } 121 | 122 | info!("Restore completed successfully"); 123 | } 124 | 125 | 126 | #[derive(Default)] 127 | struct Config { 128 | /// If true, hardlinks will be removed by cloning the file at all places it is referenced. 129 | /// If false, hardlinks are preserved. 130 | pub dereference_hardlinks: bool, 131 | } 132 | 133 | 134 | fn build_block_refcounts(files: &[File], download_cache: &mut HashMap) -> Result<()> { 135 | for file in files { 136 | build_block_refcounts_helper(file, download_cache)?; 137 | } 138 | 139 | Ok(()) 140 | } 141 | 142 | 143 | fn build_block_refcounts_helper(file: &File, download_cache: &mut HashMap) -> Result<()> { 144 | for block_id in &file.blocks { 145 | download_cache.entry(block_id.clone()).or_insert(DownloadCache{ 146 | refcount: 0, 147 | downloaded: false, 148 | id: block_id.clone(), 149 | }); 150 | download_cache.get_mut(block_id).expect("internal error").refcount += 1; 151 | } 152 | 153 | Ok(()) 154 | } 155 | 156 | 157 | fn extract_files>(config: &Config, files: &[File], base_path: P, keystore: &KeyStore, cache_dir: &Path, download_cache: &mut HashMap, backend: &mut dyn Backend) -> Result<()> { 158 | let mut hardlink_map: HashMap = HashMap::new(); 159 | // List of all directories and the mtimes they need set. 160 | // We set these after extracting all files, since extracting the files changes the mtime of 161 | // directories. 162 | let mut directory_times = Vec::new(); 163 | 164 | for file in files { 165 | let filepath = base_path.as_ref().join(&file.path); 166 | 167 | if let Some(ref symlink_path) = file.symlink { 168 | use std::os::unix; 169 | info!("Creating symlink: {} {}", symlink_path, filepath.display()); 170 | unix::fs::symlink(symlink_path, &filepath)?; 171 | } else if file.is_dir { 172 | info!("Creating directory: {}", filepath.display()); 173 | // Create and then set permissions. This is done in two steps because 174 | // mkdir is affected by the current process's umask, whereas chmod (set_permissions) is not. 175 | fs::create_dir(&filepath)?; 176 | fs::set_permissions(&filepath, fs::Permissions::from_mode(file.mode))?; 177 | directory_times.push((filepath.clone(), file.mtime, file.mtime_nsec)); 178 | } else { 179 | let hardlinked = if let Some(hardlink_id) = file.hardlink_id { 180 | if config.dereference_hardlinks { 181 | false 182 | } else { 183 | match hardlink_map.get(&hardlink_id) { 184 | Some(existing_path) => { 185 | info!("Hardlinking '{}' to '{}'", existing_path.display(), filepath.display()); 186 | fs::hard_link(existing_path, &filepath)?; 187 | true 188 | }, 189 | None => false, 190 | } 191 | } 192 | } else { 193 | false 194 | }; 195 | 196 | if !hardlinked { 197 | info!("Writing file: {}", filepath.display()); 198 | // We set permissions after creating the file because `open` uses umask. 199 | extract_file(&filepath, file, keystore, cache_dir, download_cache, backend)?; 200 | fs::set_permissions(&filepath, fs::Permissions::from_mode(file.mode))?; 201 | 202 | if !config.dereference_hardlinks { 203 | if let Some(hardlink_id) = file.hardlink_id { 204 | hardlink_map.insert(hardlink_id, filepath.clone()); 205 | } 206 | } 207 | } 208 | } 209 | 210 | set_file_time(&filepath, file.mtime, file.mtime_nsec)?; 211 | } 212 | 213 | // Set mtime for directories. 214 | // We go in reverse, so we hit child directories before their parents 215 | directory_times.reverse(); 216 | 217 | for (ref dirpath, ref mtime, ref mtime_nsec) in directory_times { 218 | set_file_time(dirpath, *mtime, *mtime_nsec)?; 219 | } 220 | 221 | Ok(()) 222 | } 223 | 224 | 225 | fn extract_file>(path: P, f: &File, keystore: &KeyStore, cache_dir: &Path, download_cache: &mut HashMap, backend: &mut dyn Backend) -> Result<()> { 226 | // Don't overwrite existing files 227 | let file = fs::OpenOptions::new().write(true).create_new(true).open(path.as_ref())?; 228 | let mut writer = BufWriter::new(&file); 229 | let mut total_written = 0; 230 | 231 | for block_id in &f.blocks { 232 | let plaintext = cache_fetch(block_id, keystore, cache_dir, download_cache, backend)?; 233 | 234 | writer.write_all(&plaintext)?; 235 | total_written += plaintext.len(); 236 | } 237 | 238 | if total_written as u64 != f.size { 239 | error!("The final extracted size of '{}' did not match what was expected: {} != {}", path.as_ref().display(), total_written, f.size); 240 | } 241 | 242 | Ok(()) 243 | } 244 | 245 | 246 | fn cache_fetch(block_id: &BlockId, keystore: &KeyStore, cache_dir: &Path, download_cache: &mut HashMap, backend: &mut dyn Backend) -> Result> { 247 | let cache = download_cache.get_mut(block_id).expect("internal error"); 248 | let path = cache_dir.join(cache.id.to_string()); 249 | 250 | if cache.downloaded { 251 | let plaintext = { 252 | let mut file = fs::File::open(path.clone())?; 253 | let mut plaintext = vec![0u8; 0]; 254 | file.read_to_end(&mut plaintext)?; 255 | plaintext 256 | }; 257 | 258 | cache.refcount -= 1; 259 | 260 | if cache.refcount == 0 { 261 | fs::remove_file(path)?; 262 | } 263 | 264 | Ok(plaintext) 265 | } else { 266 | let encrypted_block = backend.fetch_block(&cache.id)?; 267 | let plaintext = keystore.decrypt_block(&cache.id, &encrypted_block)?; 268 | 269 | cache.refcount -=1; 270 | cache.downloaded = true; 271 | 272 | if cache.refcount > 0 { 273 | let mut file = fs::File::create(path)?; 274 | file.write_all(&plaintext)?; 275 | } 276 | 277 | Ok(plaintext) 278 | } 279 | } 280 | 281 | 282 | fn set_file_time(path: &Path, mtime: i64, mtime_nsec: i64) -> Result<()> { 283 | use std::ffi::CString; 284 | use std::os::unix::prelude::*; 285 | use libc::{time_t, timespec, utimensat, c_long, AT_FDCWD, AT_SYMLINK_NOFOLLOW}; 286 | 287 | let times = [timespec { 288 | tv_sec: mtime as time_t, 289 | tv_nsec: mtime_nsec as c_long, 290 | }, 291 | timespec { 292 | tv_sec: mtime as time_t, 293 | tv_nsec: mtime_nsec as c_long, 294 | }]; 295 | let p = CString::new(path.as_os_str().as_bytes()).expect("internal error"); 296 | 297 | unsafe { 298 | if utimensat(AT_FDCWD, p.as_ptr() as *const _, times.as_ptr(), AT_SYMLINK_NOFOLLOW) == 0 { 299 | Ok(()) 300 | } else { 301 | Err(io::Error::last_os_error().into()) 302 | } 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /src/cmds/verify/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::keystore::{KeyStore, BlockId}; 2 | use std::collections::HashSet; 3 | use crate::backend::{self, Backend}; 4 | use crate::archive::{Archive, File}; 5 | use rand::prelude::*; 6 | use clap::ArgMatches; 7 | use log::{error, info}; 8 | 9 | 10 | pub fn execute(args: &ArgMatches) { 11 | let backup_name = args.value_of("NAME").expect("internal error"); 12 | let args_keyfile = args.value_of("keyfile").expect("internal error"); 13 | let args_backend = args.value_of("backend").expect("internal error"); 14 | 15 | let keystore = match KeyStore::load_from_path(args_keyfile) { 16 | Ok(keystore) => keystore, 17 | Err(err) => { 18 | error!("Unable to load keyfile: {}", err); 19 | return; 20 | } 21 | }; 22 | 23 | let mut backend = match backend::backend_from_backend_path(args_backend) { 24 | Ok(backend) => backend, 25 | Err(err) => { 26 | error!("Unable to load backend: {}", err); 27 | return; 28 | } 29 | }; 30 | 31 | let (archive_id, _) = keystore.encrypt_archive_name(&backup_name); 32 | let encrypted_archive = match backend.fetch_archive(&archive_id) { 33 | Ok(archive) => archive, 34 | Err(err) => { 35 | error!("{}", err); 36 | return; 37 | } 38 | }; 39 | let archive = match Archive::decrypt(&archive_id, &encrypted_archive, &keystore) { 40 | Ok(archive) => archive, 41 | Err(err) => { 42 | error!("{}", err); 43 | return; 44 | } 45 | }; 46 | 47 | if archive.version != 0x00000001 { 48 | error!("Unsupported archive version"); 49 | return; 50 | } 51 | 52 | let mut block_list = HashSet::new(); 53 | 54 | build_block_list(&archive.files, &mut block_list); 55 | let mut block_list: Vec = block_list.into_iter().collect(); 56 | // We shuffle so that if verification is terminated it can be run again (multiple times) and 57 | // probablistically cover all blocks. 58 | block_list.shuffle(&mut rand::thread_rng()); 59 | 60 | verify_blocks(&block_list, &keystore, &mut *backend); 61 | } 62 | 63 | 64 | fn build_block_list(files: &[File], block_list: &mut HashSet) { 65 | for file in files { 66 | for block_id in &file.blocks { 67 | block_list.insert(block_id.clone()); 68 | } 69 | } 70 | } 71 | 72 | 73 | fn verify_blocks(block_list: &[BlockId], keystore: &KeyStore, backend: &mut dyn Backend) { 74 | let mut corrupted_blocks = Vec::new(); 75 | 76 | for (idx, block_id) in block_list.iter().enumerate() { 77 | // TODO: Differentiate between a missing block and an error. Missing blocks would be critical errors. 78 | let encrypted_block = match backend.fetch_block(&block_id) { 79 | Ok(block) => block, 80 | Err(err) => { 81 | error!("A problem occured while fetching the block '{}': {}", block_id.to_string(), err); 82 | continue; 83 | } 84 | }; 85 | 86 | if keystore.decrypt_block(&block_id, &encrypted_block).is_err() { 87 | error!("CRITICAL ERROR: Block {} is corrupt. You should save a copy of the corrupted block, delete it, and then rearchive the files that created this archive. That should recreate the block.", block_id.to_string()); 88 | corrupted_blocks.push(block_id.to_string()); 89 | } 90 | 91 | if idx % 32 == 0 { 92 | info!("{:.2}% ({}/{})", 100.0 * (idx + 1) as f64 / block_list.len() as f64, idx + 1, block_list.len()); 93 | } 94 | } 95 | 96 | if !corrupted_blocks.is_empty() { 97 | error!("The following corrupted blocks were found:"); 98 | for block_id in corrupted_blocks { 99 | error!("{}", block_id); 100 | } 101 | } else { 102 | info!("No corrupted blocks were found"); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error as StdError; 2 | use std::io::Error as IoError; 3 | use rusqlite::Error as SqliteError; 4 | use std::fmt; 5 | use self::Error::*; 6 | 7 | pub type Result = ::std::result::Result; 8 | 9 | 10 | #[derive(Debug)] 11 | pub enum Error { 12 | /// I/O error 13 | Io(IoError), 14 | /// Error from JSON encoder/decoder 15 | Json(serde_json::Error), 16 | /// Bad Backend Path 17 | BadBackendPath(String), 18 | CorruptArchiveName, 19 | CorruptArchiveFailedDecompression, 20 | CorruptArchiveBadJson, 21 | CorruptBlock, 22 | CorruptKeystore, 23 | CorruptArchiveMetadata, 24 | ArchiveNameConflict, 25 | BlockNotFound, 26 | ArchiveNotFound, 27 | InvalidArchiveName, 28 | InvalidArchiveId, 29 | BackendOnDifferentDevices, 30 | Sqlite(SqliteError), 31 | } 32 | 33 | impl fmt::Display for Error { 34 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 35 | f.write_str(self.description()) 36 | } 37 | } 38 | 39 | impl StdError for Error { 40 | fn description(&self) -> &str { 41 | match *self { 42 | Io(ref e) => e.description(), 43 | Json(_) => "Invalid JSON", 44 | BadBackendPath(ref e) => e, 45 | CorruptArchiveName => "The encrypted archive name is corrupted", 46 | CorruptArchiveFailedDecompression => "The encrypted archive is corrupt: could not be decompressed", 47 | CorruptArchiveBadJson => "The encrypted archive is corrupt: the internal JSON data is invalid", 48 | CorruptBlock => "The encrypted block is corrupted", 49 | CorruptKeystore => "The keystore is corrupted", 50 | CorruptArchiveMetadata => "The archive metadata is corrupted", 51 | BlockNotFound => "The specified block was not found", 52 | ArchiveNotFound => "The specified archive was not found", 53 | InvalidArchiveName => "An invalid archive name was encountered. Possibly a stray file.", 54 | InvalidArchiveId => "An invalid archive id was encountered. Possibly a stray file.", 55 | ArchiveNameConflict => "An archive with that name already exists", 56 | BackendOnDifferentDevices => "All folders in the backend must be on the same drive", 57 | Sqlite(ref e) => e.description(), 58 | } 59 | } 60 | 61 | fn cause(&self) -> Option<&dyn StdError> { 62 | match *self { 63 | Io(ref error) => Some(error), 64 | Json(ref error) => Some(error), 65 | BadBackendPath(_) => None, 66 | CorruptArchiveName => None, 67 | CorruptArchiveFailedDecompression => None, 68 | CorruptArchiveBadJson => None, 69 | CorruptBlock => None, 70 | CorruptKeystore => None, 71 | CorruptArchiveMetadata => None, 72 | ArchiveNameConflict => None, 73 | BlockNotFound => None, 74 | InvalidArchiveName => None, 75 | InvalidArchiveId => None, 76 | ArchiveNotFound => None, 77 | BackendOnDifferentDevices => None, 78 | Sqlite(ref error) => Some(error), 79 | } 80 | } 81 | } 82 | 83 | impl From for Error { 84 | fn from(err: IoError) -> Error { 85 | Io(err) 86 | } 87 | } 88 | 89 | impl From for Error { 90 | fn from(err: serde_json::Error) -> Error { 91 | Json(err) 92 | } 93 | } 94 | 95 | impl From for Error { 96 | fn from(err: SqliteError) -> Error { 97 | Sqlite(err) 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/keystore.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, BufReader}; 2 | use crypto::pbkdf2::pbkdf2; 3 | use crypto::chacha20::ChaCha20; 4 | use crypto::hmac::Hmac; 5 | use crypto::sha2::Sha512; 6 | use crypto::mac::Mac; 7 | use crypto::symmetriccipher::SynchronousStreamCipher; 8 | use std::str::FromStr; 9 | use crate::error::*; 10 | use std::path::Path; 11 | use std::fs; 12 | use std::convert::TryFrom; 13 | use data_encoding::HEXLOWER_PERMISSIVE; 14 | 15 | 16 | // We liberally use newtypes to help prevent accidentally mixing up data, and making it more explicit what kind of data 17 | // functions accept and return. For example, you don't want to decrypt block data as if it were archive data. 18 | new_type!{ secret HmacKey(128); } 19 | new_type!{ public BlockId(32); } 20 | new_type!{ public ArchiveId(32); } 21 | new_type!{ public SIV(32); } 22 | 23 | impl ToString for BlockId { 24 | fn to_string(&self) -> String { 25 | HEXLOWER_PERMISSIVE.encode(&self.0) 26 | } 27 | } 28 | 29 | impl ToString for ArchiveId { 30 | fn to_string(&self) -> String { 31 | HEXLOWER_PERMISSIVE.encode(&self.0) 32 | } 33 | } 34 | 35 | impl FromStr for ArchiveId { 36 | type Err = Error; 37 | 38 | fn from_str(s: &str) -> ::std::result::Result { 39 | let v = HEXLOWER_PERMISSIVE.decode(s.as_bytes()).map_err(|_| Error::InvalidArchiveId)?; 40 | 41 | ArchiveId::from_slice(&v).ok_or(Error::InvalidArchiveId) 42 | } 43 | } 44 | 45 | pub struct EncryptedArchiveName(pub Vec); 46 | pub struct EncryptedBlock(pub Vec); 47 | pub struct EncryptedArchiveMetadata(pub Vec); 48 | 49 | 50 | #[derive(PartialEq, Clone)] 51 | struct SivEncryptionKeys { 52 | /// Used to calculate the siv for plaintext 53 | siv_key: HmacKey, 54 | /// The cipher key 55 | cipher_key: HmacKey, 56 | } 57 | 58 | impl SivEncryptionKeys { 59 | fn encrypt(&self, aad: &[u8], plaintext: &[u8]) -> (SIV, Vec) { 60 | let siv = self.calculate_siv(aad, plaintext); 61 | let ciphertext = self.cipher(&siv, plaintext); 62 | 63 | (siv, ciphertext) 64 | } 65 | 66 | fn decrypt(&self, aad: &[u8], siv: &SIV, ciphertext: &[u8]) -> Option> { 67 | let plaintext = self.cipher(siv, ciphertext); 68 | let expected_siv = self.calculate_siv(aad, &plaintext); 69 | 70 | if !siv.constant_eq(&expected_siv) { 71 | return None; 72 | } 73 | 74 | Some(plaintext) 75 | } 76 | 77 | // TODO: This method should be private 78 | /// Encrypts or decrypts data using the combination of self.cipher_key and nonce. 79 | /// First derives an encryption key using HMAC-SHA-512 (cipher_key, nonce) 80 | /// and then performs ChaCha20 (derived_key, data). 81 | fn cipher(&self, nonce: &SIV, data: &[u8]) -> Vec { 82 | let big_key = { 83 | let mut hmac = Hmac::new(Sha512::new(), &self.cipher_key[..]); 84 | hmac.input(&nonce[..]); 85 | hmac.result() 86 | }; 87 | let (chacha_key, chacha_nonce) = big_key.code().split_at(32); 88 | 89 | // Using slice notation here so this code panics in case we accidentally didn't derive the right size big_key 90 | let mut encryptor = ChaCha20::new(&chacha_key[..32], &chacha_nonce[..8]); 91 | let mut output = vec!(0u8; data.len()); 92 | encryptor.process(data, &mut output); 93 | output 94 | } 95 | 96 | // TODO: This method should be private 97 | /// Calculate the unique SIV for the combination of self.siv_key, aad, and plaintext. 98 | /// Equivilent to: HMAC-SHA-512-256 (siv_key, aad || plaintext || le64(aad.length) || le64(plaintext.length)) 99 | fn calculate_siv(&self, aad: &[u8], plaintext: &[u8]) -> SIV { 100 | let mut hmac = Hmac::new(Sha512::new(), &self.siv_key[..]); 101 | hmac.input(aad); 102 | hmac.input(plaintext); 103 | hmac.input(&u64::try_from(aad.len()).expect("calculate_siv: length did not fit into u64").to_le_bytes()); 104 | hmac.input(&u64::try_from(plaintext.len()).expect("calculate_siv: length did not fit into u64").to_le_bytes()); 105 | 106 | // Truncated to 256-bits 107 | SIV::from_slice(&hmac.result().code()[..32]).expect("internal error") 108 | } 109 | 110 | fn from_slice(bs: &[u8]) -> Option { 111 | if bs.len() != 256 { 112 | return None; 113 | } 114 | 115 | let (siv_key, cipher_key) = bs.split_at(128); 116 | 117 | Some(SivEncryptionKeys { 118 | siv_key: HmacKey::from_slice(siv_key)?, 119 | cipher_key: HmacKey::from_slice(cipher_key)?, 120 | }) 121 | } 122 | } 123 | 124 | 125 | #[derive(PartialEq)] 126 | pub struct KeyStore { 127 | /// The key all other keys are derived from. This is the only value that needs to be saved and loaded. 128 | master_key: HmacKey, 129 | 130 | block_keys: SivEncryptionKeys, 131 | archive_name_keys: SivEncryptionKeys, 132 | blocklist_keys: SivEncryptionKeys, 133 | metadata_keys: SivEncryptionKeys, 134 | } 135 | 136 | impl KeyStore { 137 | /// Create a new, random, KeyStore 138 | pub fn new() -> KeyStore { 139 | let master_key = HmacKey::from_rng(); 140 | 141 | KeyStore::from_master_key(master_key) 142 | } 143 | 144 | /// Derive the KeyStore from master_key. 145 | /// This is done using PBKDF2-HMAC-SHA512 (password=master_key, salt=[], iterations=1) 146 | /// to derive all the other keys in the KeyStore. 147 | pub fn from_master_key(master_key: HmacKey) -> KeyStore { 148 | let raw_keys = { 149 | let mut raw_keys = vec![0u8; 4 * 256]; 150 | let mut hmac = Hmac::new(Sha512::new(), &master_key[..]); 151 | pbkdf2(&mut hmac, &[], 1, &mut raw_keys); 152 | raw_keys 153 | }; 154 | 155 | let (block_keys, raw_keys) = raw_keys.split_at(256); 156 | let (archive_name_keys, raw_keys) = raw_keys.split_at(256); 157 | let (blocklist_keys, raw_keys) = raw_keys.split_at(256); 158 | let (metadata_keys, _) = raw_keys.split_at(256); 159 | 160 | KeyStore { 161 | master_key, 162 | 163 | block_keys: SivEncryptionKeys::from_slice(block_keys).expect("internal error"), 164 | archive_name_keys: SivEncryptionKeys::from_slice(archive_name_keys).expect("internal error"), 165 | blocklist_keys: SivEncryptionKeys::from_slice(blocklist_keys).expect("internal error"), 166 | metadata_keys: SivEncryptionKeys::from_slice(metadata_keys).expect("internal error"), 167 | } 168 | } 169 | 170 | /// Save this KeyStore to writer. This writes a hex encoded 1024-bit master key. 171 | pub fn save(&self, mut writer: W) -> Result<()> { 172 | Ok(writer.write_all(HEXLOWER_PERMISSIVE.encode(&self.master_key[..]).as_bytes())?) 173 | } 174 | 175 | /// Load KeyStore from reader. Expects a hex encoded 1024-bit master key, from which the KeyStore is derived. 176 | pub fn load(mut reader: R) -> Result { 177 | let mut hexbytes = [0u8; 256]; 178 | 179 | reader.read_exact(&mut hexbytes)?; 180 | 181 | let slice = HEXLOWER_PERMISSIVE.decode(&hexbytes).map_err(|_| Error::CorruptKeystore)?; 182 | let master_key = HmacKey::from_slice(&slice).ok_or(Error::CorruptKeystore)?; 183 | 184 | Ok(KeyStore::from_master_key(master_key)) 185 | } 186 | 187 | pub fn load_from_path>(path: P) -> Result { 188 | let file = fs::File::open(path)?; 189 | let mut reader = BufReader::new(file); 190 | 191 | KeyStore::load(&mut reader) 192 | } 193 | 194 | pub fn encrypt_block(&self, block: &[u8]) -> (BlockId, EncryptedBlock) { 195 | let (id, ciphertext) = self.block_keys.encrypt(&[], block); 196 | 197 | (BlockId(id.0), EncryptedBlock(ciphertext)) 198 | } 199 | 200 | pub fn decrypt_block(&self, block_id: &BlockId, encrypted_block: &EncryptedBlock) -> Result> { 201 | self.block_keys.decrypt(&[], &SIV(block_id.clone().0), &encrypted_block.0).ok_or(Error::CorruptBlock) 202 | } 203 | 204 | pub fn encrypt_archive_name(&self, name: &str) -> (ArchiveId, EncryptedArchiveName) { 205 | let (id, ciphertext) = self.archive_name_keys.encrypt(&[], name.as_bytes()); 206 | 207 | (ArchiveId(id.0), EncryptedArchiveName(ciphertext)) 208 | } 209 | 210 | pub fn decrypt_archive_name(&self, archive_id: &ArchiveId, encrypted_name: &EncryptedArchiveName) -> Result { 211 | let plaintext = self.archive_name_keys.decrypt(&[], &SIV(archive_id.clone().0), &encrypted_name.0).ok_or(Error::CorruptArchiveName)?; 212 | 213 | String::from_utf8(plaintext).map_err(|_| Error::CorruptArchiveName) 214 | } 215 | 216 | pub fn encrypt_archive_metadata(&self, archive_id: &ArchiveId, metadata: &[u8]) -> EncryptedArchiveMetadata { 217 | let (metadata_siv, encrypted_metadata) = self.metadata_keys.encrypt(&archive_id[..], metadata); 218 | let mut result = Vec::new(); 219 | 220 | result.extend_from_slice(&metadata_siv[..]); 221 | result.extend_from_slice(&encrypted_metadata); 222 | 223 | EncryptedArchiveMetadata(result) 224 | } 225 | 226 | pub fn decrypt_archive_metadata(&self, archive_id: &ArchiveId, encrypted_metadata: &EncryptedArchiveMetadata) -> Result> { 227 | if encrypted_metadata.0.len() < 32 { 228 | return Err(Error::CorruptArchiveMetadata); 229 | } 230 | 231 | let (siv, ciphertext) = encrypted_metadata.0.split_at(32); 232 | 233 | let plaintext = self.metadata_keys.decrypt(&archive_id[..], &SIV::from_slice(siv).expect("internal error"), ciphertext).ok_or(Error::CorruptArchiveMetadata)?; 234 | 235 | Ok(plaintext) 236 | } 237 | } 238 | 239 | 240 | 241 | #[cfg(test)] 242 | mod test { 243 | use super::{HmacKey, SivEncryptionKeys, KeyStore, SIV}; 244 | use crypto::pbkdf2::pbkdf2; 245 | use crypto::hmac::Hmac; 246 | use crypto::sha2::Sha512; 247 | use data_encoding::HEXLOWER_PERMISSIVE; 248 | use rand::rngs::OsRng; 249 | use rand::Rng; 250 | use rand::seq::SliceRandom; 251 | 252 | // TODO: As a sanity check, we should perform some statistical tests on the outputs from all the encryption functions. 253 | // If they are implemented correctly, all output should look indistiguishable from random. 254 | 255 | fn from_hexstr(hexstr: &str) -> Vec { 256 | HEXLOWER_PERMISSIVE.decode(hexstr.as_bytes()).unwrap() 257 | } 258 | 259 | // PBKDF2 output should be extendable (i.e. we can add keys to the KeyStore later by increasing the length passed to PBKDF2) 260 | #[test] 261 | fn test_pbkdf2_extendable() { 262 | let key = HmacKey::from_rng(); 263 | 264 | let out1 = { 265 | let mut output = vec![0u8; 100]; 266 | let mut hmac = Hmac::new(Sha512::new(), &key[..]); 267 | pbkdf2(&mut hmac, &[], 1, &mut output); 268 | output 269 | }; 270 | 271 | let out2 = { 272 | let mut output = vec![0u8; 200]; 273 | let mut hmac = Hmac::new(Sha512::new(), &key[..]); 274 | pbkdf2(&mut hmac, &[], 1, &mut output); 275 | output.truncate(100); 276 | output 277 | }; 278 | 279 | assert_eq!(out1, out2); 280 | } 281 | 282 | // Exercises the encryption system 283 | #[test] 284 | fn test_encryption() { 285 | let keys = SivEncryptionKeys { 286 | siv_key: HmacKey::from_rng(), 287 | cipher_key: HmacKey::from_rng(), 288 | }; 289 | 290 | let other_keys = SivEncryptionKeys { 291 | siv_key: HmacKey::from_rng(), 292 | cipher_key: HmacKey::from_rng(), 293 | }; 294 | 295 | let mut plaintext = vec![0u8; OsRng.gen_range(16, 1024)]; 296 | let mut aad = vec![0u8; OsRng.gen_range(0, 1024)]; 297 | OsRng.fill(&mut plaintext[..]); 298 | OsRng.fill(&mut aad[..]); 299 | 300 | // The same aad and plaintext should result in the same siv and ciphertext 301 | let (siv1, ciphertext1) = keys.encrypt(&aad, &plaintext); 302 | let (siv2, ciphertext2) = keys.encrypt(&aad, &plaintext); 303 | assert_eq!(siv1, siv2); 304 | assert_eq!(ciphertext1, ciphertext2); 305 | 306 | // But not if the key changes (NOTE: Random chance could result in ciphertexts being equal, but the liklihood is impossibly small for our test case (which has a minimum 16 byte plaintext)) 307 | let (other_siv, other_ciphertext) = other_keys.encrypt(&aad, &plaintext); 308 | assert_ne!(other_siv, siv1); 309 | assert_ne!(other_ciphertext, ciphertext1); 310 | 311 | // Changing aad or plaintext should change siv and ciphertext 312 | let (siv3, ciphertext3) = keys.encrypt(b"different inputs", &plaintext); 313 | let (siv4, ciphertext4) = keys.encrypt(&aad, b"different inputs"); 314 | assert_ne!(siv1, siv3); 315 | assert_ne!(ciphertext1, ciphertext3); 316 | assert_ne!(siv1, siv4); 317 | assert_ne!(ciphertext1, ciphertext4); 318 | assert_ne!(siv3, siv4); 319 | assert_ne!(ciphertext3, ciphertext4); 320 | 321 | // Ciphertext should be completely different even if only one byte of plaintext is different. 322 | let mut mutated_plaintext = plaintext.clone(); 323 | *mutated_plaintext.choose_mut(&mut OsRng).unwrap() ^= 0xa; 324 | let (siv5, ciphertext5) = keys.encrypt(&aad, &plaintext[..plaintext.len() - 1]); 325 | let (siv6, ciphertext6) = keys.encrypt(&aad, &mutated_plaintext); 326 | assert_ne!(siv1, siv5); 327 | assert_ne!(&ciphertext1[..plaintext.len() - 1], &ciphertext5[..]); 328 | assert_ne!(siv1, siv6); 329 | assert_ne!(&ciphertext1, &ciphertext6); 330 | 331 | // Length preserving 332 | assert_eq!(plaintext.len(), ciphertext1.len()); 333 | 334 | // Can be decrypted 335 | assert_eq!(keys.decrypt(&aad, &siv1, &ciphertext1).unwrap(), plaintext); 336 | 337 | // Using the wrong key, siv, aad, or ciphertext should cause decryption errors 338 | assert!(keys.decrypt(&aad, &siv3, &ciphertext1).is_none()); 339 | assert!(keys.decrypt(&aad, &siv1, &ciphertext3).is_none()); 340 | assert!(keys.decrypt(b"this is not the aad you are looking for", &siv1, &ciphertext1).is_none()); 341 | assert!(keys.decrypt(&aad, &siv1, &ciphertext1[..ciphertext1.len()-1]).is_none()); 342 | assert!(other_keys.decrypt(&aad, &siv1, &ciphertext1).is_none()); 343 | } 344 | 345 | #[test] 346 | fn test_known_encryption_vectors() { 347 | let test_keys = SivEncryptionKeys { 348 | siv_key: HmacKey::from_slice(&from_hexstr("2ceaccb6b306992f6affd27049b62d823a90f8125a808d292e27f5f82bf7629b8f9ada4a8135ed99cf5d5aef0ca6a69fe54104a8246e7e5a6bb210d0c945559834d3d12b40bd61cf75a462aad1a0d71d0d963957fb8270e83902f48bfd7b8e8f0603c503238c3b24c8f4ab645c521732f31bd0b3d455448f33d56102476ee5c3")).unwrap(), 349 | cipher_key: HmacKey::from_slice(&from_hexstr("8d45ccdc385e71c9ab0619d212fcc5118fb44c7d8b37d5dc0db4214b9787905913bdd73e3afe1db5fbea82263d3171c17d2acdf88517e6d78cdb5339f10f50ef68a55950aca578c7a170476da81a705abdf031e74bf6fbf65180e51ee14983c7d100f377cea3a27caca46fd2e2bb2cca48afd5f49cf18fbe43d580e0465b308a")).unwrap(), 350 | }; 351 | let test_aad = b"aad"; 352 | let test_plaintext = b"plaintext"; 353 | let test_siv = SIV::from_slice(&from_hexstr("805165cad67979f70e16de978a34693972856db82c390b5bc824fc197a68d5d5")).unwrap(); 354 | let test_ciphertext = from_hexstr("c7a4a22690419ee831"); 355 | 356 | let (siv, ciphertext) = test_keys.encrypt(test_aad, test_plaintext); 357 | assert_eq!(siv, test_siv); 358 | assert_eq!(ciphertext, test_ciphertext); 359 | 360 | // This test vector was generated using an independent Python implementation 361 | let test_keys = SivEncryptionKeys { 362 | siv_key: HmacKey::from_slice(&from_hexstr("bf2bb483cb12aa8fb38370c3f1debfbe6f357ab0b4f0468107e95fa744f8f8419ad3a24dc2789e815ddd4a91852c96b79c6a79da6fd0b90a80359f1f91630a66389788d704e011870c04211527c7175f8dfa560779113ebe2f2486bde5d1cef883d9ad5b80f2e0530782c2d287107023f7b5834f98a370bb3310b39d58376d28")).unwrap(), 363 | cipher_key: HmacKey::from_slice(&from_hexstr("0b4d46a0f976497075238d681c7738c128eaeed7394eb700af0a00f7a452193cad43d2fa99360da728f42d1ddd45a4bc8c14ffe0eb4a40e33bf9180c5bb1201ef25615b55dd8b109f6a9f019157460aeae57bc2dd1ab6b0676386cbfd30d60ce96413dee81a339fc7d537f9a5c21bcf9836e9e40c68edaaf6a0fb18a0f7a1338")).unwrap(), 364 | }; 365 | let test_aad = b"archive id"; 366 | let test_plaintext = b"deterministic authenticated encryption"; 367 | let test_siv = SIV::from_slice(&from_hexstr("1f5453bee0dee9b19cecc680249d3410d275801109f8780204d698fba56fb33c")).unwrap(); 368 | let test_ciphertext = from_hexstr("5f0271a16eb3f842cd268078a34bca95b7b35a57b260edb6870a058c37461efb373a02d419e8"); 369 | 370 | let (siv, ciphertext) = test_keys.encrypt(test_aad, test_plaintext); 371 | assert_eq!(siv, test_siv); 372 | assert_eq!(ciphertext, test_ciphertext); 373 | } 374 | 375 | #[test] 376 | fn test_keystore() { 377 | // Test vector generated manually using Python: hexlify(hashlib.pbkdf2_hmac('sha512', master_key, b'', 1, dklen=256*4)) 378 | let master_key = HmacKey::from_slice(&from_hexstr("46efca626234765806a7079a8f51f6d172fd2912106eee2f6a826c8869286684eb27d026c5368827424be8ae915987f820af7ac9a3e670cfd16b3e8e611cb1a9cea329489f2049472b4bd924872526d012336356aa949833a279c469720e617f2e9096803a27b674e71265c417eff499b40d86da9aceb17be46d8f470d2a11db")).unwrap(); 379 | let keystore = KeyStore::from_master_key(master_key.clone()); 380 | 381 | let keystore_data = [ 382 | &keystore.block_keys.siv_key[..], &keystore.block_keys.cipher_key[..], 383 | &keystore.archive_name_keys.siv_key[..], &keystore.archive_name_keys.cipher_key[..], 384 | &keystore.blocklist_keys.siv_key[..], &keystore.blocklist_keys.cipher_key[..], 385 | &keystore.metadata_keys.siv_key[..], &keystore.metadata_keys.cipher_key[..], 386 | ].concat(); 387 | 388 | assert_eq!(keystore_data, from_hexstr("054c9173d52fb8b6fd4bd001230f934ba922ee2a72931a1bf3b82e2852b5ba3ac39fdd5c49173dc345fc42d551025aa41a537dbb9ccfbcd1ac596bdb47f8e61a1e98fe4767984ddc43622e5f3c4ffd6219328bea11ec9b59b913297f8f23991fce948448202fe46923cfd5e08abe293c0f4b3080d588e84c53197b3ba8a129e77bb1a0d5edddb15563c2d41d3e90e8a5857242f17364a70e7bbf73ca717b0930288e966dc3b84dee3e4beeb89fedd92bbbc03c7a26a822eca2fe0dda425adea887bef8f968c2584e8e234583db00eed0f768db9b56bbf1def531a67e3f22f0658024a508d5bae8a04b40163ca4e5ced838987f95d9bd9f4bae2f36d77b3f4e9d254f98b6286e3a1ee1324fb996aeeec95ba4dd4aa658a93bea87ec2ba766cab922322ddb529c03db2fb6ac19d515f11331faaff3c4d26888e98bc84e165dabe842528372a60f4c3ea46bbdc47a255d21728d066d3965bb618407b57aa3f155500a0eccf2e632b0af30d54012464fcde6fe96e5e4f1931ff28bd55bf29a0c5bc21ab566b7a05d9282f9fcc91d49465404384b0512dc03ae6cd7044e366b4e4dec4e9ed869382cb3cc6db2700b9c5c0965e3847b3b045b8cfb2e0209318bd4ba29d97afbfdd738c93cf78477e0d274bae95f64187dd4f9752b959ae7dacadc7eb257661d125d1cc4a08d0243d105c7f7e2f87d63340da0ff106b759b52bc608b99a57df18e143f78d85f1e1b7340d49fee84920ee1275b85a00dce55bbed81d0db883c710ee5a9d232ae8bd1793ed33223f5b3aba8610d005b11c9d1fd6aa0148f67468d4f51c2c889fb26d66c9cbd57072bfbfa5649f759e1d13ee5397babb50674598dd51ad9e29f2684c57ec6642efb11ea67a8cc48617d696203c300bb3fae17ac4036208b7876f1e59da4126229a52103cd1995a95da4ab96d4e68ab6d62e1f15d65a71c9f54a605d03be5902ebbef49c68c190ad5948d0fbfedae17e376613ee28ada120a346c5dd70e8f762bb48cddaa006a93b041b71b1bd5e6b9c6b24558047e719a11d6293a876a149c9667642c9f311c1a4779432af7d7f39f90998dd3f3c87e73dc976cc06d825c58168711825729e91c4608b492482585085d1c9d8669fc1dd4157297d290c560ebd136aadc18c6e5f48df8b125b235586dc36fa9330fc773ae00e33fa6491cf71bc0e323c1f578e40a399b3e9a3d48b6bcb0cb098e8e8783496991d5d887be527fcdfa56fe3c27ff2c0eadaaeb5706eee881b633618dfc8468d0d9a5f131ff3a976b2cbb817978eb62caf07cf6edaa879aea79fcb9f451ab06fb2b4f40c51375d27a2dff25c3ea4afab2e2ed7b03f3c64a223e2d3deec7023ee43300b9648b12732004dc34b5b21ba087b21efcb7e0c4af8a4fb5c2a3f47a9c7e40e461d63d4d4961bc576fa35cc3a4f09a19b109bbbcaf07468")); 389 | 390 | // Test serialization 391 | let mut buffer = Vec::new(); 392 | 393 | keystore.save(&mut buffer).unwrap(); 394 | let restored_keystore = KeyStore::load(&buffer[..]).unwrap(); 395 | 396 | assert!(restored_keystore == keystore); 397 | assert_eq!(restored_keystore.master_key, master_key); 398 | } 399 | 400 | // Tests the higher level APIs (encrypt block, encrypt archive, etc) 401 | // Mostly just sanity checks, since other tests verify that the underlying encryption functions are correct. 402 | #[test] 403 | fn test_encrypt_objects() { 404 | let keystore = KeyStore::new(); 405 | let test_data = "just plain old data"; 406 | 407 | let (block_id, mut block_ciphertext) = keystore.encrypt_block(test_data.as_bytes()); 408 | let (archive_id, name_ciphertext) = keystore.encrypt_archive_name(test_data); 409 | let metadata_ciphertext = keystore.encrypt_archive_metadata(&archive_id, test_data.as_bytes()); 410 | 411 | // Decryption should work 412 | assert_eq!(test_data.as_bytes(), &keystore.decrypt_block(&block_id, &block_ciphertext).unwrap()[..]); 413 | assert_eq!(test_data, keystore.decrypt_archive_name(&archive_id, &name_ciphertext).unwrap()); 414 | assert_eq!(test_data.as_bytes(), &keystore.decrypt_archive_metadata(&archive_id, &metadata_ciphertext).unwrap()[..]); 415 | 416 | // Even when the data is the same, every type of object should get different IDs because different keys are used 417 | assert_ne!(&block_id[..], &archive_id[..]); 418 | assert_ne!(block_ciphertext.0, name_ciphertext.0); 419 | 420 | // Decryption should fail if ciphertext is modified 421 | block_ciphertext.0[0] ^= 0xbe; 422 | assert!(keystore.decrypt_block(&block_id, &block_ciphertext).is_err()); 423 | 424 | // Make sure encrypting unicode names works 425 | let unicode_name = "(╯°□°)╯︵ ┻━┻"; 426 | let (archive_id, name_ciphertext) = keystore.encrypt_archive_name(unicode_name); 427 | assert_eq!(unicode_name, keystore.decrypt_archive_name(&archive_id, &name_ciphertext).unwrap()); 428 | } 429 | 430 | // Tests to make sure the underlying Encode function is working correctly 431 | #[test] 432 | fn test_encode() { 433 | let keystore = KeyStore::new(); 434 | 435 | let test1_a = b"a"; 436 | let test1_b = b"ab"; 437 | 438 | let test2_a = b"aa"; 439 | let test2_b = b"b"; 440 | 441 | assert_ne!(keystore.block_keys.encrypt(test1_a, test1_b).0, keystore.block_keys.encrypt(test2_a, test2_b).0); 442 | } 443 | 444 | // This test makes sure that the encryption system is using the right keys for handling different types of objects. 445 | // For example, blocks should be encrypted using the block keys, not the archive name keys. 446 | #[test] 447 | fn test_object_encryption_keys_unique() { 448 | let keystore = KeyStore::new(); 449 | let test_data = "just plain old data"; 450 | 451 | let (block_id, block_ciphertext) = keystore.encrypt_block(test_data.as_bytes()); 452 | let (archive_id, name_ciphertext) = keystore.encrypt_archive_name(test_data); 453 | let metadata_ciphertext = keystore.encrypt_archive_metadata(&archive_id, test_data.as_bytes()); 454 | 455 | // Now try to decrypt, but corrupt all the other keys that shouldn't be used. If the system is using the right key, that decryption should still be successful. 456 | let mut modified_keystore = KeyStore::new(); 457 | modified_keystore.block_keys = keystore.block_keys.clone(); 458 | assert_eq!(test_data.as_bytes(), &modified_keystore.decrypt_block(&block_id, &block_ciphertext).unwrap()[..]); 459 | 460 | let mut modified_keystore = KeyStore::new(); 461 | modified_keystore.archive_name_keys = keystore.archive_name_keys.clone(); 462 | assert_eq!(test_data, modified_keystore.decrypt_archive_name(&archive_id, &name_ciphertext).unwrap()); 463 | 464 | let mut modified_keystore = KeyStore::new(); 465 | modified_keystore.metadata_keys = keystore.metadata_keys.clone(); 466 | assert_eq!(test_data.as_bytes(), &modified_keystore.decrypt_archive_metadata(&archive_id, &metadata_ciphertext).unwrap()[..]); 467 | } 468 | } 469 | -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use time; 2 | use std::fs::File; 3 | use std::io::Write; 4 | use std::sync::Mutex; 5 | use std::path::Path; 6 | 7 | 8 | pub struct Logger { 9 | log_level: log::LevelFilter, 10 | log_file: Option>, 11 | } 12 | 13 | impl log::Log for Logger { 14 | fn enabled(&self, metadata: &log::Metadata) -> bool { 15 | metadata.level() <= self.log_level 16 | } 17 | 18 | fn log(&self, record: &log::Record) { 19 | if !self.enabled(record.metadata()) { 20 | return; 21 | } 22 | 23 | let level = match record.level() { 24 | log::Level::Error => "ERROR", 25 | log::Level::Warn => "WARNING", 26 | log::Level::Info => "INFO", 27 | log::Level::Debug => "DEBUG", 28 | log::Level::Trace => "TRACE", 29 | }; 30 | 31 | let timestamp = time::strftime("%Y-%m-%d %H:%M:%S", &time::now()).unwrap(); 32 | 33 | // Log to the file if one was set 34 | if let Some(ref f) = self.log_file { 35 | let mut m = f.lock().unwrap(); 36 | write!(m, "[{}] {}: {}\n", timestamp, level, record.args()).unwrap(); 37 | } 38 | 39 | // Log to stdout 40 | match record.level() { 41 | log::Level::Info => println!("{}", record.args()), 42 | _ => println!("{}: {}", level, record.args()), 43 | } 44 | } 45 | 46 | fn flush(&self) { 47 | let stdout = std::io::stdout(); 48 | stdout.lock().flush().unwrap(); 49 | 50 | if let Some(ref f) = self.log_file { 51 | f.lock().unwrap().flush().unwrap(); 52 | } 53 | } 54 | } 55 | 56 | impl Logger { 57 | pub fn init>(log_level: log::LevelFilter, log_file_path: Option

) { 58 | let log_file = log_file_path.map(|path| { 59 | let file = File::create(path.as_ref()).unwrap_or_else(|err| { 60 | panic!("ERROR: Unable to open log file at '{}' for writing: {}", path.as_ref().display(), err) 61 | }); 62 | Mutex::new(file) 63 | }); 64 | 65 | log::set_boxed_logger(Box::new(Logger { 66 | log_level, 67 | log_file, 68 | })).unwrap_or_else(|err| { 69 | panic!("ERROR: Unable to initialize logger: {}", err) 70 | }); 71 | log::set_max_level(log_level); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod newtype_macros; 3 | mod keystore; 4 | mod archive; 5 | mod backend; 6 | mod cmds; 7 | mod logger; 8 | mod error; 9 | 10 | use crate::logger::Logger; 11 | use clap::{App, AppSettings, SubCommand, Arg, crate_version}; 12 | 13 | 14 | fn main() { 15 | let matches = App::new("preserve") 16 | .version(crate_version!()) 17 | .about("Robust, Encrypted Backup") 18 | .setting(AppSettings::SubcommandRequiredElseHelp) 19 | .setting(AppSettings::VersionlessSubcommands) 20 | .setting(AppSettings::UnifiedHelpMessage) 21 | .setting(AppSettings::ColoredHelp) 22 | .args_from_usage( 23 | "--logfile=[LOGFILE] 'Sets a file to write a log to' 24 | --verbose 'Be verbose'") 25 | .subcommand(SubCommand::with_name("create") 26 | .about("create a new backup") 27 | .setting(AppSettings::UnifiedHelpMessage) 28 | .setting(AppSettings::ColoredHelp) 29 | .args_from_usage( 30 | "--keyfile= 'Sets the keyfile to use' 31 | --backend= 'Sets the backend to use' 32 | --dereference 'Follow symlinks' 33 | --one-file-system 'Ignore things on other filesystems' 34 | 'Unique name for this backup' 35 | 'The path to backup'") 36 | .arg( 37 | Arg::with_name("exclude") 38 | .long("exclude") 39 | .takes_value(true) 40 | .multiple(true) 41 | .number_of_values(1) 42 | .help("Exclude the given path") 43 | ) 44 | ) 45 | .subcommand(SubCommand::with_name("keygen") 46 | .about("create a new keyfile") 47 | .setting(AppSettings::UnifiedHelpMessage) 48 | .setting(AppSettings::ColoredHelp) 49 | .args_from_usage( 50 | "--keyfile=[FILE] 'Write the new keyfile to FILE'") 51 | ) 52 | .subcommand(SubCommand::with_name("list") 53 | .about("list existing backups") 54 | .setting(AppSettings::UnifiedHelpMessage) 55 | .setting(AppSettings::ColoredHelp) 56 | .args_from_usage( 57 | "--keyfile= 'Sets the keyfile to use' 58 | --backend= 'Sets the backend to use'") 59 | ) 60 | .subcommand(SubCommand::with_name("restore") 61 | .about("restore an existing backup") 62 | .setting(AppSettings::UnifiedHelpMessage) 63 | .setting(AppSettings::ColoredHelp) 64 | .args_from_usage( 65 | "--keyfile= 'Sets the keyfile to use' 66 | --backend= 'Sets the backend to use' 67 | --hard-dereference 'Dereference hardlinks' 68 | --debug-decrypt 'Just fetch and decrypt the archive; no decompression, parsing, or extraction' 69 | 'Name of the backup to restore' 70 | [PATH] 'Where to extract the backup to'") 71 | ) 72 | .subcommand(SubCommand::with_name("verify") 73 | .about("verify the integrity of an existing backup and all encrypted blocks it references") 74 | .setting(AppSettings::UnifiedHelpMessage) 75 | .setting(AppSettings::ColoredHelp) 76 | .args_from_usage( 77 | "--keyfile= 'Sets the keyfile to use' 78 | --backend= 'Sets the backend to use' 79 | 'The name of the backup to verify'") 80 | ) 81 | .subcommand(SubCommand::with_name("diff") 82 | .about("diff two existing backups") 83 | .setting(AppSettings::UnifiedHelpMessage) 84 | .setting(AppSettings::ColoredHelp) 85 | .args_from_usage( 86 | "--keyfile= 'Sets the keyfile to use' 87 | --backend= 'Sets the backend to use' 88 | 'The name of the first backup' 89 | 'The name of the second backup'") 90 | ) 91 | .get_matches(); 92 | 93 | Logger::init(log::LevelFilter::Info, matches.value_of("logfile")); 94 | 95 | match matches.subcommand() { 96 | ("create", Some(sub_m)) => cmds::create::execute(sub_m), 97 | ("keygen", Some(sub_m)) => cmds::keygen::execute(sub_m), 98 | ("list", Some(sub_m)) => cmds::list::execute(sub_m), 99 | ("restore", Some(sub_m)) => cmds::restore::execute(sub_m), 100 | ("verify", Some(sub_m)) => cmds::verify::execute(sub_m), 101 | ("diff", Some(sub_m)) => cmds::diff::execute(sub_m), 102 | _ => panic!("Unknown subcommand"), 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/newtype_macros.rs: -------------------------------------------------------------------------------- 1 | /* 2 | Copied and modified from github.com/dnaq/sodiumoxide 3 | 4 | Copyright (c) 2015 The sodiumoxide Developers 5 | 6 | License: MIT OR Apache-2.0 7 | */ 8 | 9 | 10 | macro_rules! newtype_clone (($newtype:ident) => ( 11 | impl Clone for $newtype { 12 | fn clone(&self) -> $newtype { 13 | let &$newtype(v) = self; 14 | $newtype(v) 15 | } 16 | } 17 | 18 | )); 19 | 20 | macro_rules! newtype_from_slice (($newtype:ident, $len:expr) => ( 21 | /// `from_slice()` creates an object from a byte slice 22 | /// 23 | /// This function will fail and return `None` if the length of 24 | /// the byte-slice isn't equal to the length of the object 25 | pub fn from_slice(bs: &[u8]) -> Option<$newtype> { 26 | if bs.len() != $len { 27 | return None; 28 | } 29 | let mut n = $newtype([0; $len]); 30 | for (ni, &bsi) in n.0.iter_mut().zip(bs.iter()) { 31 | *ni = bsi 32 | } 33 | Some(n) 34 | } 35 | 36 | #[allow(dead_code)] 37 | pub fn from_rng() -> $newtype { 38 | use rand::RngCore; 39 | let mut n = $newtype([0; $len]); 40 | { 41 | let $newtype(ref mut b) = n; 42 | ::rand::rngs::OsRng.fill_bytes(b); 43 | } 44 | n 45 | } 46 | 47 | /// Performs a constant time comparison 48 | pub fn constant_eq(&self, &$newtype(ref other): &$newtype) -> bool { 49 | use crypto::util::fixed_time_eq; 50 | let &$newtype(ref this) = self; 51 | fixed_time_eq(this, other) 52 | } 53 | )); 54 | 55 | macro_rules! newtype_traits (($newtype:ident, $len:expr) => ( 56 | impl ::std::cmp::PartialEq for $newtype { 57 | fn eq(&self, other: &$newtype) -> bool { 58 | self.constant_eq(other) 59 | } 60 | } 61 | 62 | impl ::std::cmp::Eq for $newtype {} 63 | 64 | impl ::serde::Serialize for $newtype { 65 | fn serialize(&self, serializer: S) -> ::std::result::Result 66 | where S: ::serde::Serializer 67 | { 68 | use ::data_encoding::HEXLOWER_PERMISSIVE; 69 | serializer.serialize_str(&HEXLOWER_PERMISSIVE.encode(&self[..])) 70 | } 71 | } 72 | 73 | impl<'de> ::serde::Deserialize<'de> for $newtype { 74 | fn deserialize(deserializer: D) -> ::std::result::Result<$newtype, D::Error> 75 | where D: ::serde::Deserializer<'de> 76 | { 77 | struct NewtypeVisitor; 78 | impl<'de> ::serde::de::Visitor<'de> for NewtypeVisitor { 79 | type Value = $newtype; 80 | fn expecting(&self, formatter: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { 81 | write!(formatter, stringify!($newtype)) 82 | } 83 | 84 | fn visit_str(self, v: &str) -> ::std::result::Result 85 | where E: ::serde::de::Error 86 | { 87 | use ::data_encoding::HEXLOWER_PERMISSIVE; 88 | let slice = HEXLOWER_PERMISSIVE.decode(v.as_bytes()).map_err(::serde::de::Error::custom)?; 89 | $newtype::from_slice(&slice).ok_or(::serde::de::Error::invalid_length(slice.len(), &self)) 90 | } 91 | } 92 | 93 | deserializer.deserialize_str(NewtypeVisitor) 94 | } 95 | } 96 | 97 | /// Allows a user to access the byte contents of an object as a slice. 98 | /// 99 | /// WARNING: it might be tempting to do comparisons on objects 100 | /// by using `x[a..b] == y[a..b]`. This will open up for timing attacks 101 | /// when comparing for example authenticator tags. Because of this only 102 | /// use the comparison functions exposed by the sodiumoxide API. 103 | impl ::std::ops::Index<::std::ops::Range> for $newtype { 104 | type Output = [u8]; 105 | fn index(&self, _index: ::std::ops::Range) -> &[u8] { 106 | self.0.index(_index) 107 | } 108 | } 109 | /// Allows a user to access the byte contents of an object as a slice. 110 | /// 111 | /// WARNING: it might be tempting to do comparisons on objects 112 | /// by using `x[..b] == y[..b]`. This will open up for timing attacks 113 | /// when comparing for example authenticator tags. Because of this only 114 | /// use the comparison functions exposed by the sodiumoxide API. 115 | impl ::std::ops::Index<::std::ops::RangeTo> for $newtype { 116 | type Output = [u8]; 117 | fn index(&self, _index: ::std::ops::RangeTo) -> &[u8] { 118 | self.0.index(_index) 119 | } 120 | } 121 | /// Allows a user to access the byte contents of an object as a slice. 122 | /// 123 | /// WARNING: it might be tempting to do comparisons on objects 124 | /// by using `x[a..] == y[a..]`. This will open up for timing attacks 125 | /// when comparing for example authenticator tags. Because of this only 126 | /// use the comparison functions exposed by the sodiumoxide API. 127 | impl ::std::ops::Index<::std::ops::RangeFrom> for $newtype { 128 | type Output = [u8]; 129 | fn index(&self, _index: ::std::ops::RangeFrom) -> &[u8] { 130 | self.0.index(_index) 131 | } 132 | } 133 | /// Allows a user to access the byte contents of an object as a slice. 134 | /// 135 | /// WARNING: it might be tempting to do comparisons on objects 136 | /// by using `x[] == y[]`. This will open up for timing attacks 137 | /// when comparing for example authenticator tags. Because of this only 138 | /// use the comparison functions exposed by the sodiumoxide API. 139 | impl ::std::ops::Index<::std::ops::RangeFull> for $newtype { 140 | type Output = [u8]; 141 | fn index(&self, _index: ::std::ops::RangeFull) -> &[u8] { 142 | self.0.index(_index) 143 | } 144 | } 145 | 146 | impl ::std::fmt::Debug for $newtype { 147 | fn fmt(&self, 148 | formatter: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { 149 | write!(formatter, "{}({:?})", stringify!($newtype), &self[..]) 150 | } 151 | } 152 | )); 153 | 154 | macro_rules! public_newtype_traits (($newtype:ident) => ( 155 | impl AsRef<[u8]> for $newtype { 156 | #[inline] 157 | fn as_ref(&self) -> &[u8] { 158 | &self[..] 159 | } 160 | } 161 | impl ::std::cmp::PartialOrd for $newtype { 162 | #[inline] 163 | fn partial_cmp(&self, 164 | other: &$newtype) -> Option<::std::cmp::Ordering> { 165 | ::std::cmp::PartialOrd::partial_cmp(self.as_ref(), other.as_ref()) 166 | } 167 | #[inline] 168 | fn lt(&self, other: &$newtype) -> bool { 169 | ::std::cmp::PartialOrd::lt(self.as_ref(), other.as_ref()) 170 | } 171 | #[inline] 172 | fn le(&self, other: &$newtype) -> bool { 173 | ::std::cmp::PartialOrd::le(self.as_ref(), other.as_ref()) 174 | } 175 | #[inline] 176 | fn ge(&self, other: &$newtype) -> bool { 177 | ::std::cmp::PartialOrd::ge(self.as_ref(), other.as_ref()) 178 | } 179 | #[inline] 180 | fn gt(&self, other: &$newtype) -> bool { 181 | ::std::cmp::PartialOrd::gt(self.as_ref(), other.as_ref()) 182 | } 183 | } 184 | impl ::std::cmp::Ord for $newtype { 185 | #[inline] 186 | fn cmp(&self, other: &$newtype) -> ::std::cmp::Ordering { 187 | ::std::cmp::Ord::cmp(self.as_ref(), other.as_ref()) 188 | } 189 | } 190 | impl ::std::hash::Hash for $newtype { 191 | fn hash(&self, state: &mut H) { 192 | ::std::hash::Hash::hash(self.as_ref(), state) 193 | } 194 | } 195 | )); 196 | 197 | /// Macro used for generating newtypes of byte-arrays 198 | /// 199 | /// Usage: 200 | /// Generating secret datatypes, e.g. keys 201 | /// new_type! { 202 | /// /// This is some documentation for our type 203 | /// secret Key(KEYBYTES); 204 | /// } 205 | /// Generating public datatypes, e.g. public keys 206 | /// ``` 207 | /// new_type! { 208 | /// /// This is some documentation for our type 209 | /// public PublicKey(PUBLICKEYBYTES); 210 | /// } 211 | /// ``` 212 | macro_rules! new_type { 213 | ( $(#[$meta:meta])* 214 | secret $name:ident($bytes:expr); 215 | ) => ( 216 | $(#[$meta])* 217 | #[must_use] 218 | pub struct $name(pub [u8; $bytes]); 219 | newtype_clone!($name); 220 | newtype_traits!($name, $bytes); 221 | impl $name { 222 | newtype_from_slice!($name, $bytes); 223 | } 224 | impl Drop for $name { 225 | fn drop(&mut self) { 226 | use crypto::util::secure_memset; 227 | let &mut $name(ref mut v) = self; 228 | secure_memset(v, 0); 229 | } 230 | } 231 | ); 232 | ( $(#[$meta:meta])* 233 | public $name:ident($bytes:expr); 234 | ) => ( 235 | $(#[$meta])* 236 | #[derive(Copy)] 237 | #[must_use] 238 | pub struct $name(pub [u8; $bytes]); 239 | newtype_clone!($name); 240 | newtype_traits!($name, $bytes); 241 | public_newtype_traits!($name); 242 | impl $name { 243 | newtype_from_slice!($name, $bytes); 244 | } 245 | ); 246 | } 247 | -------------------------------------------------------------------------------- /tests/docker-test/.gitignore: -------------------------------------------------------------------------------- 1 | exported-backup-image.tar 2 | logs 3 | src/preserve-src 4 | -------------------------------------------------------------------------------- /tests/docker-test/README.md: -------------------------------------------------------------------------------- 1 | This test suite uses Docker containers to do integration testing of Preserve. The Rust driven integration testing cannot fully isolate Preserve during tests, so it can't, for example, muck around with the OS itself, install packages, etc. This Docker based integration test can install packages in the container, back them up, and then do a restore in a fresh container to see if Preserve restores those system files. This is a nice way to do testing that is more akin to the real world (Preserve isn't really designed to backup packages, but it's a good test of its flexibility). 2 | 3 | 4 | How it works: 5 | * A docker image is created with preserve on it. 6 | * The image is run, backing up the system, storing the backup to a "backup" volume. It also installs some packages, backs up again, etc. 7 | * The image is run again with the same "backup" volume mounted, but performs a restore. 8 | * The restore container restores the latest backup and compares the result against the filesystem state of the backup container. 9 | * They restored filesystem and the backup container's filesystem should match, outside of expected variation (e.g. /dev isn't backed up). 10 | 11 | 12 | How to test: 13 | Run `test.py` and ensure that when it prints "---------- DIFF ----------" that the diff is empty. 14 | -------------------------------------------------------------------------------- /tests/docker-test/src/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:latest 2 | 3 | RUN apt-get update && apt-get -y upgrade 4 | RUN apt-get -y install ca-certificates libsqlite3-dev 5 | 6 | RUN mkdir /backup 7 | RUN mkdir /restore 8 | 9 | WORKDIR /preserve 10 | 11 | ADD create-backup.sh ./ 12 | ADD restore-backup.sh ./ 13 | ADD preserve-src ./ 14 | 15 | RUN cargo build --release 16 | RUN cp target/release/preserve ./ -------------------------------------------------------------------------------- /tests/docker-test/src/create-backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./preserve keygen --keyfile /backup/keyfile 5 | ./preserve create --keyfile /backup/keyfile --backend file:///backup --exclude /backup testbackup1 / 6 | apt-get install -y fish 7 | ./preserve create --keyfile /backup/keyfile --backend file:///backup --exclude /backup testbackup2 / 8 | -------------------------------------------------------------------------------- /tests/docker-test/src/restore-backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./preserve restore --keyfile /backup/keyfile --backend file:///backup testbackup2 /restore 5 | 6 | # Compare 7 | # NOTE: We filter out messages about missing /dev, /proc, /sys, and /backup, because those aren't included 8 | # in the backup (no point in backing those up). 9 | # NOTE: We filter out messages about /etc/hostname, /etc/hosts, and /etc/resolv.conf because those are 10 | # mounted files in a Docker container. 11 | # NOTE: We filter out messages about Gid differing because Preserve does not currently support restoring 12 | # uid/gid. 13 | DIFFOUTPUT=`tar --diff -f /exported-backup-image.tar -C /restore |& grep -vE "^tar: (dev|proc|sys|backup|run|tmp)[:/].*No such file or directory$" | grep -vE "^etc/(hostname|hosts|resolv.conf): .*$" | grep -v "^.*Gid differs$" | grep -v "^tar: preserve/cache.sqlite: .*$" | grep -v "^preserve/log.txt: .*$" || true` 14 | 15 | >&2 echo "" 16 | >&2 echo "---------- DIFF ----------" 17 | >&2 echo "$DIFFOUTPUT" 18 | >&2 echo "" 19 | >&2 echo "--------------------------" -------------------------------------------------------------------------------- /tests/docker-test/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # TODO: Use a Docker library instead 3 | import subprocess 4 | import os 5 | import tempfile 6 | import shutil 7 | 8 | 9 | def main(): 10 | os.makedirs("logs", exist_ok=True) 11 | 12 | # Create temporary folders to hold the backup data and then restored result 13 | backup_dir = tempfile.mkdtemp(prefix="preserve-docker-test-backup-") 14 | 15 | # NOTE: On macOS (as of 2019.01.18) mktemp et al return a path starting with '/var'. 16 | # Docker for macOS, by default, won't mount '/var' paths. 17 | # But '/var' is actually a symlink to '/private' which Docker for macOS will mount. 18 | # So we use realpath to resolve the symlink. 19 | backup_dir = os.path.realpath(backup_dir) 20 | 21 | # Copy preserve source code for Docker 22 | shutil.rmtree('src/preserve-src', ignore_errors=True) 23 | shutil.copytree('../../src', 'src/preserve-src/src') 24 | shutil.copy('../../Cargo.toml', 'src/preserve-src/') 25 | shutil.copy('../../Cargo.lock', 'src/preserve-src/') 26 | 27 | # Build the test image 28 | print("Building Docker image...") 29 | run_command(['docker', 'build', '-t', 'preserve-test', '-f', 'src/Dockerfile', 'src'], logpath='logs/docker.build.log') 30 | 31 | # Backup 32 | print("Running backup container...") 33 | run_command(['docker', 'run', '-v', "{}:/backup".format(backup_dir), '--name', 'preserve-test-backup', 'preserve-test', 'bash', 'create-backup.sh'], logpath='logs/docker.run.backup.log') 34 | run_command(['docker', 'export', 'preserve-test-backup'], logpath='exported-backup-image.tar') 35 | run_command(['docker', 'rm', 'preserve-test-backup']) 36 | 37 | # Restore and Compare 38 | # Comparison results are printed to stderr 39 | print("Running restore container...") 40 | run_command(['docker', 'run', '-v', "{}:/backup".format(backup_dir), '-v', '{}:/exported-backup-image.tar'.format(os.path.join(os.getcwd(), 'exported-backup-image.tar')), '--rm', 'preserve-test', 'bash', 'restore-backup.sh'], logpath='logs/docker.run.restore.log') 41 | 42 | # Done 43 | print() 44 | print("NOTE: ") 45 | print("Left behind the following folders/files, in case they are needed for inspection:") 46 | print(backup_dir) 47 | print("exported-backup-image.tar") 48 | print("logs/*") 49 | 50 | 51 | def run_command(args, logpath=None, stderr=None, check=True): 52 | result = subprocess.run(args, stdout=subprocess.PIPE, check=check, stderr=stderr) 53 | if logpath is not None: 54 | with open(logpath, 'wb') as f: 55 | f.write(result.stdout) 56 | 57 | return result 58 | 59 | 60 | main() -------------------------------------------------------------------------------- /tests/test.rs: -------------------------------------------------------------------------------- 1 | extern crate tempfile; 2 | extern crate rand; 3 | extern crate libc; 4 | 5 | use rand::prelude::*; 6 | use std::process::Command; 7 | use std::fs::{self, File}; 8 | use std::path::{Path, PathBuf}; 9 | use std::io::{Write, BufWriter}; 10 | use tempfile::TempDir; 11 | use std::os::unix::fs::{DirBuilderExt, OpenOptionsExt}; 12 | use std::cmp; 13 | use std::os::unix; 14 | 15 | 16 | #[test] 17 | fn integration_test_1() { 18 | let working_dir = tempfile::Builder::new().prefix("preserve-test").tempdir().unwrap(); 19 | let backend_dir = tempfile::Builder::new().prefix("preserve-test").tempdir().unwrap(); 20 | 21 | let test_config = TestConfig { 22 | bin: Path::new("target/debug/preserve").canonicalize().unwrap(), 23 | working_dir: working_dir.path().to_path_buf(), 24 | backend_dir: backend_dir.path().to_path_buf(), 25 | }; 26 | 27 | // Generate keyfile 28 | test_config.init(); 29 | 30 | // Test case 31 | let original_dir = TestGenerator::new().generate_test_case(); 32 | 33 | // First test 34 | { 35 | test_config.create("test1", original_dir.path()); 36 | let restore_dir = test_config.restore("test1"); 37 | match compare_dirs(original_dir.path(), restore_dir.path()) { 38 | Ok(_) => (), 39 | Err(err) => handle_failed_restore(original_dir.path(), restore_dir.path(), "Restored directory did not match original directory", &err), 40 | }; 41 | } 42 | 43 | // Add a file 44 | { 45 | let mut file = File::create(original_dir.path().join("test.txt")).unwrap(); 46 | file.write_all(b"This is a new file").unwrap(); 47 | } 48 | 49 | // Test again 50 | { 51 | test_config.create("test2", original_dir.path()); 52 | let restore_dir = test_config.restore("test2"); 53 | match compare_dirs(original_dir.path(), restore_dir.path()) { 54 | Ok(_) => (), 55 | Err(err) => handle_failed_restore(original_dir.path(), restore_dir.path(), "Restored directory did not match original directory with added file", &err), 56 | }; 57 | } 58 | 59 | // Modify a file 60 | { 61 | let mut file = File::create(original_dir.path().join("test.txt")).unwrap(); 62 | file.write_all(b"This is a different file").unwrap(); 63 | } 64 | 65 | // Test again 66 | { 67 | test_config.create("test3", original_dir.path()); 68 | let restore_dir = test_config.restore("test3"); 69 | match compare_dirs(original_dir.path(), restore_dir.path()) { 70 | Ok(_) => (), 71 | Err(err) => handle_failed_restore(original_dir.path(), restore_dir.path(), "Restored directory did not match original directory with modified file", &err), 72 | }; 73 | } 74 | 75 | // Check old backup 76 | let original_dir = TestGenerator::new().generate_test_case(); 77 | 78 | { 79 | let restore_dir = test_config.restore("test1"); 80 | match compare_dirs(original_dir.path(), restore_dir.path()) { 81 | Ok(_) => (), 82 | Err(err) => handle_failed_restore(original_dir.path(), restore_dir.path(), "Restored directory did not match old original directory", &err), 83 | }; 84 | } 85 | 86 | // Inverse test to make sure things are working as expected 87 | { 88 | let restore_dir = test_config.restore("test2"); 89 | match compare_dirs(original_dir.path(), restore_dir.path()) { 90 | Ok(_) => handle_failed_restore(original_dir.path(), restore_dir.path(), "Restored test2 should not match old original directory", ""), 91 | Err(_) => (), 92 | }; 93 | } 94 | } 95 | 96 | 97 | // Dump our testcase to a folder so we can inspect it 98 | #[test] 99 | #[ignore] 100 | fn dump_test_case() { 101 | // Test case 102 | let original_dir = TestGenerator::new().generate_test_case(); 103 | 104 | // Save the test case 105 | panic!("Saved to: {:?}", original_dir.into_path()); 106 | } 107 | 108 | 109 | // Information about the current test, such as the temporary directories where we're storing 110 | // the keyfile, backend path, etc. 111 | struct TestConfig { 112 | pub bin: PathBuf, 113 | pub working_dir: PathBuf, 114 | pub backend_dir: PathBuf, 115 | } 116 | 117 | impl TestConfig { 118 | pub fn init(&self) { 119 | Command::new(&self.bin) 120 | .current_dir(&self.working_dir) 121 | .arg("keygen") 122 | .arg("--keyfile").arg("keyfile") 123 | .output().unwrap(); 124 | } 125 | 126 | pub fn create>(&self, backup_name: &str, path: P) { 127 | let output = Command::new(&self.bin) 128 | .current_dir(&self.working_dir) 129 | .arg("create") 130 | .arg("--keyfile").arg("keyfile") 131 | .arg("--backend").arg("file://".to_string() + &self.backend_dir.to_string_lossy()) 132 | .arg(backup_name) 133 | .arg(path.as_ref()) 134 | .output().unwrap(); 135 | 136 | println!("create-stdout: {}", String::from_utf8_lossy(&output.stdout)); 137 | println!("create-stderr: {}", String::from_utf8_lossy(&output.stderr)); 138 | } 139 | 140 | pub fn restore(&self, backup_name: &str) -> TempDir { 141 | let restore_dir = tempfile::Builder::new().prefix("preserve-test").tempdir().unwrap(); 142 | 143 | let output = Command::new(&self.bin) 144 | .current_dir(&self.working_dir) 145 | .arg("restore") 146 | .arg("--keyfile").arg("keyfile") 147 | .arg("--backend").arg("file://".to_string() + &self.backend_dir.to_string_lossy()) 148 | .arg(backup_name) 149 | .arg(restore_dir.path()) 150 | .output().unwrap(); 151 | 152 | println!("restore-stdout: {}", String::from_utf8_lossy(&output.stdout)); 153 | println!("restore-stderr: {}", String::from_utf8_lossy(&output.stderr)); 154 | 155 | restore_dir 156 | } 157 | } 158 | 159 | 160 | struct TestGenerator { 161 | rng: Box, 162 | } 163 | 164 | 165 | impl TestGenerator { 166 | fn new() -> TestGenerator { 167 | TestGenerator { 168 | // We don't need the rng used here to be cryptographically secure. StdRng is just chosen because it's seedable and fairly performant. 169 | // We want something seedable so that the generated test cases are somewhat deterministic. Determinism is helpful for debugging. 170 | // It'd be a pain to debug tests when the test cases keep changing. We don't care about determinism between platforms and future versions. 171 | rng: Box::new(StdRng::seed_from_u64(42)), 172 | } 173 | } 174 | 175 | // Create a temporary folder, fill it with our testcase, and return it. 176 | // The testcase is generated randomly; random file tree with random names, contents, lengths, permissions, etc. 177 | // There is, however, some minimum requirements. There will always be at least one empty file, some symlinks, and some hardlinks. 178 | // Generation is deterministic. 179 | // The generation is performed by filling a folder with a random number of files, folders, symlinks, and hardlinks. 180 | // Each generated folder is then recursively filled the same way. 181 | // TODO: Add requiment for bad symlinks 182 | fn generate_test_case(&mut self) -> TempDir { 183 | let basepath = tempfile::Builder::new().prefix("preserve-test").tempdir().unwrap(); 184 | let mut number_of_symlinks = 0; 185 | let mut number_of_hardlinks = 0; 186 | let mut number_of_empty_files = 0; 187 | let mut all_files = Vec::new(); 188 | let mut all_folders = Vec::new(); 189 | let mut tasks = Vec::new(); 190 | 191 | tasks.push(basepath.path().to_path_buf()); 192 | 193 | while let Some(parent) = tasks.pop() { 194 | // Prevent generating too deep 195 | let num_nodes_to_generate = if parent.strip_prefix(basepath.path()).unwrap().components().count() >= 3 { 196 | 0 197 | } else { 198 | self.rng.gen_range(0, 18) 199 | }; 200 | 201 | for _ in 0..num_nodes_to_generate { 202 | let filename = self.generate_random_name(); 203 | let path = parent.join(filename); 204 | 205 | 206 | match self.rng.gen_range(0, 100) { 207 | 0 ..= 49 => { 208 | // File: 50% 209 | self.generate_random_file(&path); 210 | 211 | if path.metadata().unwrap().len() == 0 { 212 | number_of_empty_files += 1; 213 | } 214 | 215 | all_files.push(path); 216 | }, 217 | 50 ..= 79 => { 218 | // Folder: 30% 219 | self.generate_random_folder(&path); 220 | all_folders.push(path.clone()); 221 | tasks.push(path.clone()); 222 | }, 223 | 80 ..= 89 => { 224 | // Symlink: 10% 225 | self.generate_random_symlink(&path, &all_files, &all_folders); 226 | number_of_symlinks += 1; 227 | }, 228 | _ => { 229 | // Hardlink: 10% 230 | if self.generate_random_hardlink(&path, &all_files) { 231 | number_of_hardlinks += 1; 232 | } 233 | }, 234 | } 235 | } 236 | 237 | // If our minimum requirements for the test case have not been met, then try again. 238 | if tasks.is_empty() && (number_of_symlinks < 3 || number_of_hardlinks < 3 || number_of_empty_files < 1 || all_folders.is_empty()) { 239 | tasks.push(basepath.path().to_path_buf()); 240 | } 241 | } 242 | 243 | // Set random times for all the folders 244 | // We do this after, because generating the contents of the folders will undo what we set. 245 | // We do this in reverse, so that we set the children times before the parent folder. 246 | all_folders.reverse(); 247 | 248 | for folder in all_folders { 249 | self.set_random_filetime(folder); 250 | } 251 | 252 | basepath 253 | } 254 | 255 | // Generate a random file name 256 | // Random length. Randomly includes unicode. 257 | fn generate_random_name(&mut self) -> String { 258 | let alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-."; 259 | let alphabet: Vec = alphabet.chars().collect(); 260 | let mut name = String::new(); 261 | let len = self.rng.gen_range(1, 256); 262 | 263 | loop { 264 | // 12% chance of being random alphanumeric unicode char, otherwise use alphabet above 265 | let c: char = if self.rng.next_u32() < 0x2000_0000 { 266 | (&mut self.rng).sample_iter(&rand::distributions::Standard).filter(|c: &char| c.is_alphanumeric()).next().unwrap() 267 | } else { 268 | *alphabet.choose(&mut self.rng).unwrap() 269 | }; 270 | 271 | name.push(c); 272 | 273 | if name.len() == len { 274 | return name; 275 | } 276 | 277 | if name.len() > len { 278 | // We want a specific number of bytes (not chars) and the last char went too far. Try again. 279 | name.pop(); 280 | } 281 | } 282 | } 283 | 284 | // Generate a random file at the given path. 285 | // Length, contents, permissions, etc. will be random. 286 | fn generate_random_file>(&mut self, path: P) { 287 | let mode = (self.rng.next_u32() & 511) | 0o600; 288 | let len = match self.rng.gen_range(0, 100) { 289 | 0 ..= 9 => 0, // Empty (10%) 290 | 10 ..= 59 => self.rng.gen_range(1, 1024), // Small (50%) 291 | 60 ..= 89 => self.rng.gen_range(1, 2*1024*1024), // Medium (30%) 292 | _ => self.rng.gen_range(1, 32*1024*1024), // Large (10%) 293 | }; 294 | 295 | // Generate alphanumberic data sometimes 296 | let is_ascii = self.rng.gen_range(0, 2) == 0; 297 | 298 | { 299 | let file = fs::OpenOptions::new().write(true).create(true).mode(mode).open(path.as_ref()).unwrap(); 300 | let mut writer = BufWriter::new(file); 301 | let mut written = 0; 302 | let mut buffer = [0u8; 4096]; 303 | 304 | while written < len { 305 | let chunk_size = cmp::min(buffer.len(), len - written); 306 | 307 | if is_ascii { 308 | let string_data: String = (&mut self.rng).sample_iter(&rand::distributions::Alphanumeric).take(chunk_size).collect(); 309 | writer.write_all(string_data.as_bytes()).unwrap(); 310 | written += string_data.len(); 311 | } 312 | else { 313 | self.rng.fill_bytes(&mut buffer); 314 | writer.write_all(&buffer[..chunk_size]).unwrap(); 315 | written += chunk_size; 316 | } 317 | } 318 | } 319 | 320 | self.set_random_filetime(path); 321 | } 322 | 323 | fn generate_random_folder>(&mut self, path: P) { 324 | let mode = (self.rng.next_u32() & 511) | 0o700; 325 | fs::DirBuilder::new().mode(mode).create(path).unwrap(); 326 | } 327 | 328 | // Generate a symlink at the given path, linking randomly to one of the potential_folders or potential_files. 329 | // The link's path to the target will be relative. 330 | // If no targets are possible, or randomly, a bad symlink will be generated 331 | fn generate_random_symlink>(&mut self, path: P, potential_files: &[PathBuf], potential_folders: &[PathBuf]) { 332 | let target = if self.rng.gen() { 333 | if self.rng.gen() { 334 | None // Bad symlink 335 | } else { 336 | potential_files.choose(&mut self.rng) 337 | } 338 | } else { 339 | potential_folders.choose(&mut self.rng) 340 | }; 341 | 342 | let target = match target { 343 | Some(target) => TestGenerator::calculate_relative_path(path.as_ref().parent().unwrap(), target), 344 | None => PathBuf::from(self.generate_random_name()), // Bad symlink 345 | }; 346 | 347 | unix::fs::symlink(target, path.as_ref()).unwrap(); 348 | self.set_random_filetime(path.as_ref()); 349 | } 350 | 351 | // Returns a path relative to from (must be a directory) which gets to to (either directory or file) 352 | fn calculate_relative_path, Q: AsRef>(from: P, to: Q) -> PathBuf { 353 | let mut result = String::new(); 354 | let mut current = from.as_ref().to_path_buf(); 355 | 356 | loop { 357 | match to.as_ref().strip_prefix(¤t) { 358 | Ok(remaining) => { 359 | let final_result = PathBuf::from(result).join(remaining); 360 | 361 | return if final_result.to_string_lossy() == "" { 362 | PathBuf::from("./") 363 | } else { 364 | final_result 365 | }; 366 | }, 367 | Err(_) => (), 368 | } 369 | 370 | result.push_str("../"); 371 | current.pop(); 372 | } 373 | } 374 | 375 | // Generate a hardlink at the given path, linking randomly to one of the potential_files. 376 | // Returns false when potential_files is empty 377 | fn generate_random_hardlink>(&mut self, path: P, potential_files: &[PathBuf]) -> bool { 378 | match potential_files.choose(&mut self.rng) { 379 | Some(target) => { 380 | fs::hard_link(target, path).unwrap(); 381 | true 382 | }, 383 | None => false, 384 | } 385 | } 386 | 387 | fn generate_random_filetime(&mut self) -> (i64, i64) { 388 | let base_time = 1456713592; 389 | 390 | (self.rng.gen_range(base_time-25600000, base_time+25600000), self.rng.gen_range(0, 1000000000)) 391 | } 392 | 393 | fn set_random_filetime>(&mut self, path: P) { 394 | let (time, time_nsec) = self.generate_random_filetime(); 395 | 396 | TestGenerator::set_file_time(path, time, time_nsec); 397 | } 398 | 399 | // Does not follow symlinks, so if used on a symlink it will set the mtime 400 | // for the link itself, rather than the file the link points to. 401 | fn set_file_time>(path: P, mtime: i64, mtime_nsec: i64) { 402 | use std::ffi::CString; 403 | use std::os::unix::prelude::*; 404 | use libc::{time_t, timespec, utimensat, c_long, AT_FDCWD, AT_SYMLINK_NOFOLLOW}; 405 | use std::io; 406 | 407 | let times = [timespec { 408 | tv_sec: mtime as time_t, 409 | tv_nsec: mtime_nsec as c_long, 410 | }, 411 | timespec { 412 | tv_sec: mtime as time_t, 413 | tv_nsec: mtime_nsec as c_long, 414 | }]; 415 | let p = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap(); 416 | 417 | unsafe { 418 | if utimensat(AT_FDCWD, p.as_ptr() as *const _, times.as_ptr(), AT_SYMLINK_NOFOLLOW) == 0 { 419 | Ok(()) 420 | } else { 421 | Err(io::Error::last_os_error()) 422 | } 423 | }.unwrap(); 424 | } 425 | } 426 | 427 | // Compares the given directories using rsync. 428 | // The returned error String is the output of rsync when they don't match. 429 | fn compare_dirs, Q: AsRef>(path1: P, path2: Q) -> Result<(), String> { 430 | // rsync should compare mtime, permissions, contents, etc. 431 | let output = Command::new("rsync") 432 | .arg("-avnci") // Archive mode, verbose, dry-run, checksum, itemized list 433 | .arg("--delete") // Check for missing files 434 | .arg(path1.as_ref().to_str().unwrap().to_string() + "/") 435 | .arg(path2.as_ref().to_str().unwrap().to_string() + "/") 436 | .output().unwrap(); 437 | 438 | let output = String::from_utf8_lossy(&output.stdout); 439 | let same = parse_rsync_output(&output); 440 | 441 | if same { 442 | Ok(()) 443 | } else { 444 | Err(output.to_string()) 445 | } 446 | } 447 | 448 | // Parses rsync's output to determine if the two paths are the same (no meaningful differnce). 449 | // Returns true if they are the same. 450 | #[cfg(target_os = "macos")] 451 | fn parse_rsync_output(output: &str) -> bool { 452 | let mut output_lines = output.lines(); 453 | let mut same = true; 454 | 455 | same &= output_lines.next().unwrap_or("x").contains("file list"); 456 | same &= match output_lines.next().unwrap_or("x") { 457 | ".d..t.... ./" => output_lines.next().unwrap_or("x") == "", 458 | "" => true, 459 | _ => false, 460 | }; 461 | same &= output_lines.next().unwrap_or("x").starts_with("sent "); 462 | same &= output_lines.next().unwrap_or("x").starts_with("total "); 463 | 464 | same 465 | } 466 | 467 | #[cfg(target_os = "linux")] 468 | fn parse_rsync_output(output: &str) -> bool { 469 | let mut output_lines = output.lines(); 470 | let mut same = true; 471 | 472 | same &= output_lines.next().unwrap_or("x").contains("file list"); 473 | same &= match output_lines.next().unwrap_or("x") { 474 | ".d..t...... ./" => output_lines.next().unwrap_or("x") == "", 475 | "" => true, 476 | _ => false, 477 | }; 478 | same &= output_lines.next().unwrap_or("x").starts_with("sent "); 479 | same &= output_lines.next().unwrap_or("x").starts_with("total "); 480 | 481 | same 482 | } 483 | 484 | // We use temporary directories for everything, so in the case of failure save the directories 485 | // for inspection, and then panic with the error message. 486 | fn handle_failed_restore, Q: AsRef>(original_dir: P, restore_dir: Q, reason: &str, err: &str) { 487 | let random_str: String = rand::thread_rng().sample_iter(&rand::distributions::Alphanumeric).take(10).collect(); 488 | 489 | fs::rename(original_dir, "/tmp/preserve-test-failed-original-".to_string() + &random_str).unwrap(); 490 | fs::rename(restore_dir, "/tmp/preserve-test-failed-restore-".to_string() + &random_str).unwrap(); 491 | panic!("{}\nOriginal and Restore directories have been saved for inspection: /tmp/preserve-test-failed-*-{}\nrsync output:\n{}", reason, random_str, err) 492 | } 493 | --------------------------------------------------------------------------------