├── .gitattributes ├── .gitignore ├── .travis.yml ├── AUTHORS ├── CHANGES ├── LICENSE ├── MANIFEST.in ├── README.rst ├── attic ├── __init__.py ├── _chunker.c ├── _hashindex.c ├── _version.py ├── archive.py ├── archiver.py ├── cache.py ├── chunker.pyx ├── crypto.pyx ├── fuse.py ├── hashindex.pyx ├── helpers.py ├── key.py ├── lrucache.py ├── platform.py ├── platform_darwin.pyx ├── platform_freebsd.pyx ├── platform_linux.pyx ├── remote.py ├── repository.py ├── testsuite │ ├── __init__.py │ ├── archive.py │ ├── archiver.py │ ├── chunker.py │ ├── crypto.py │ ├── hashindex.py │ ├── helpers.py │ ├── key.py │ ├── lrucache.py │ ├── mock.py │ ├── platform.py │ ├── repository.py │ ├── run.py │ └── xattr.py └── xattr.py ├── docs ├── Makefile ├── _themes │ └── attic │ │ ├── sidebarlogo.html │ │ ├── sidebarusefullinks.html │ │ ├── static │ │ └── attic.css_t │ │ └── theme.conf ├── conf.py ├── faq.rst ├── foreword.rst ├── global.rst.inc ├── index.rst ├── installation.rst ├── quickstart.rst ├── update_usage.sh └── usage.rst ├── scripts └── attic ├── setup.py ├── tox.ini └── versioneer.py /.gitattributes: -------------------------------------------------------------------------------- 1 | attic/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | MANIFEST 2 | docs/_build 3 | build 4 | dist 5 | env 6 | .tox 7 | hashindex.c 8 | chunker.c 9 | *.egg-info 10 | *.pyc 11 | *.pyo 12 | *.so 13 | docs/usage/*.inc 14 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.2" 4 | - "3.3" 5 | - "3.4" 6 | # command to install dependencies 7 | install: 8 | - "sudo apt-get install -y libacl1-dev" 9 | - "pip install --use-mirrors Cython" 10 | - "pip install -e ." 11 | # command to run tests 12 | script: fakeroot -u python -m attic.testsuite.run -vb 13 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Attic is written and maintained by Jonas Borgström and 2 | various contributors: 3 | 4 | Development Lead 5 | ```````````````` 6 | 7 | - Jonas Borgström 8 | 9 | Patches and Suggestions 10 | ``````````````````````` 11 | - Brian Johnson 12 | - Cyril Roussillon 13 | - Dan Christensen 14 | - Jeremy Maitin-Shepard 15 | - Johann Klähn 16 | - Petros Moisiadis 17 | - Thomas Waldmann 18 | -------------------------------------------------------------------------------- /CHANGES: -------------------------------------------------------------------------------- 1 | Attic Changelog 2 | =============== 3 | 4 | Here you can see the full list of changes between each Attic release. 5 | 6 | Version 0.17 7 | ------------ 8 | 9 | (bugfix release, released on X) 10 | - Fix hashindex ARM memory alignment issue (#309) 11 | - Improve hashindex error messages (#298) 12 | 13 | Version 0.16 14 | ------------ 15 | 16 | (bugfix release, released on May 16, 2015) 17 | - Fix typo preventing the security confirmation prompt from working (#303) 18 | - Improve handling of systems with improperly configured file system encoding (#289) 19 | - Fix "All archives" output for attic info. (#183) 20 | - More user friendly error message when repository key file is not found (#236) 21 | - Fix parsing of iso 8601 timestamps with zero microseconds (#282) 22 | 23 | Version 0.15 24 | ------------ 25 | 26 | (bugfix release, released on Apr 15, 2015) 27 | - xattr: Be less strict about unknown/unsupported platforms (#239) 28 | - Reduce repository listing memory usage (#163). 29 | - Fix BrokenPipeError for remote repositories (#233) 30 | - Fix incorrect behavior with two character directory names (#265, #268) 31 | - Require approval before accessing relocated/moved repository (#271) 32 | - Require approval before accessing previously unknown unencrypted repositories (#271) 33 | - Fix issue with hash index files larger than 2GB. 34 | - Fix Python 3.2 compatibility issue with noatime open() (#164) 35 | - Include missing pyx files in dist files (#168) 36 | 37 | Version 0.14 38 | ------------ 39 | 40 | (feature release, released on Dec 17, 2014) 41 | - Added support for stripping leading path segments (#95) 42 | "attic extract --strip-segments X" 43 | - Add workaround for old Linux systems without acl_extended_file_no_follow (#96) 44 | - Add MacPorts' path to the default openssl search path (#101) 45 | - HashIndex improvements, eliminates unnecessary IO on low memory systems. 46 | - Fix "Number of files" output for attic info. (#124) 47 | - limit create file permissions so files aren't read while restoring 48 | - Fix issue with empty xattr values (#106) 49 | 50 | Version 0.13 51 | ------------ 52 | 53 | (feature release, released on Jun 29, 2014) 54 | 55 | - Fix sporadic "Resource temporarily unavailable" when using remote repositories 56 | - Reduce file cache memory usage (#90) 57 | - Faster AES encryption (utilizing AES-NI when available) 58 | - Experimental Linux, OS X and FreeBSD ACL support (#66) 59 | - Added support for backup and restore of BSDFlags (OSX, FreeBSD) (#56) 60 | - Fix bug where xattrs on symlinks were not correctly restored 61 | - Added cachedir support. CACHEDIR.TAG compatible cache directories 62 | can now be excluded using ``--exclude-caches`` (#74) 63 | - Fix crash on extreme mtime timestamps (year 2400+) (#81) 64 | - Fix Python 3.2 specific lockf issue (EDEADLK) 65 | 66 | Version 0.12 67 | ------------ 68 | 69 | (feature release, released on April 7, 2014) 70 | 71 | - Python 3.4 support (#62) 72 | - Various documentation improvements a new style 73 | - ``attic mount`` now supports mounting an entire repository not only 74 | individual archives (#59) 75 | - Added option to restrict remote repository access to specific path(s): 76 | ``attic serve --restrict-to-path X`` (#51) 77 | - Include "all archives" size information in "--stats" output. (#54) 78 | - Added ``--stats`` option to ``attic delete`` and ``attic prune`` 79 | - Fixed bug where ``attic prune`` used UTC instead of the local time zone 80 | when determining which archives to keep. 81 | - Switch to SI units (Power of 1000 instead 1024) when printing file sizes 82 | 83 | Version 0.11 84 | ------------ 85 | 86 | (feature release, released on March 7, 2014) 87 | 88 | - New "check" command for repository consistency checking (#24) 89 | - Documentation improvements 90 | - Fix exception during "attic create" with repeated files (#39) 91 | - New "--exclude-from" option for attic create/extract/verify. 92 | - Improved archive metadata deduplication. 93 | - "attic verify" has been deprecated. Use "attic extract --dry-run" instead. 94 | - "attic prune --hourly|daily|..." has been deprecated. 95 | Use "attic prune --keep-hourly|daily|..." instead. 96 | - Ignore xattr errors during "extract" if not supported by the filesystem. (#46) 97 | 98 | Version 0.10 99 | ------------ 100 | 101 | (bugfix release, released on Jan 30, 2014) 102 | 103 | - Fix deadlock when extracting 0 sized files from remote repositories 104 | - "--exclude" wildcard patterns are now properly applied to the full path 105 | not just the file name part (#5). 106 | - Make source code endianness agnostic (#1) 107 | 108 | Version 0.9 109 | ----------- 110 | 111 | (feature release, released on Jan 23, 2014) 112 | 113 | - Remote repository speed and reliability improvements. 114 | - Fix sorting of segment names to ignore NFS left over files. (#17) 115 | - Fix incorrect display of time (#13) 116 | - Improved error handling / reporting. (#12) 117 | - Use fcntl() instead of flock() when locking repository/cache. (#15) 118 | - Let ssh figure out port/user if not specified so we don't override .ssh/config (#9) 119 | - Improved libcrypto path detection (#23). 120 | 121 | Version 0.8.1 122 | ------------- 123 | 124 | (bugfix release, released on Oct 4, 2013) 125 | 126 | - Fix segmentation fault issue. 127 | 128 | Version 0.8 129 | ----------- 130 | 131 | (feature release, released on Oct 3, 2013) 132 | 133 | - Fix xattr issue when backing up sshfs filesystems (#4) 134 | - Fix issue with excessive index file size (#6) 135 | - Support access of read only repositories. 136 | - New syntax to enable repository encryption: 137 | attic init --encryption="none|passphrase|keyfile". 138 | - Detect and abort if repository is older than the cache. 139 | 140 | 141 | Version 0.7 142 | ----------- 143 | 144 | (feature release, released on Aug 5, 2013) 145 | 146 | - Ported to FreeBSD 147 | - Improved documentation 148 | - Experimental: Archives mountable as fuse filesystems. 149 | - The "user." prefix is no longer stripped from xattrs on Linux 150 | 151 | 152 | Version 0.6.1 153 | ------------- 154 | 155 | (bugfix release, released on July 19, 2013) 156 | 157 | - Fixed an issue where mtime was not always correctly restored. 158 | 159 | 160 | Version 0.6 161 | ----------- 162 | 163 | First public release on July 9, 2013 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2010-2014 Jonas Borgström 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions 6 | are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 2. Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in 12 | the documentation and/or other materials provided with the 13 | distribution. 14 | 3. The name of the author may not be used to endorse or promote 15 | products derived from this software without specific prior 16 | written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 19 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 22 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE 24 | GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 | IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 27 | OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 28 | IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst LICENSE CHANGES MANIFEST.in versioneer.py 2 | recursive-include attic *.pyx 3 | recursive-include docs * 4 | recursive-exclude docs *.pyc 5 | recursive-exclude docs *.pyo 6 | prune docs/_build 7 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | What is Attic? 2 | -------------- 3 | Attic is a deduplicating backup program. The main goal of Attic is to provide 4 | an efficient and secure way to backup data. The data deduplication 5 | technique used makes Attic suitable for daily backups since only changes 6 | are stored. 7 | 8 | Easy to use 9 | ~~~~~~~~~~~ 10 | Initialize backup repository and create a backup archive:: 11 | 12 | $ attic init /usbdrive/my-backup.attic 13 | $ attic create -v /usbdrive/my-backup.attic::documents ~/Documents 14 | 15 | Main features 16 | ~~~~~~~~~~~~~ 17 | Space efficient storage 18 | Variable block size deduplication is used to reduce the number of bytes 19 | stored by detecting redundant data. Each file is split into a number of 20 | variable length chunks and only chunks that have never been seen before are 21 | compressed and added to the repository. 22 | 23 | Optional data encryption 24 | All data can be protected using 256-bit AES encryption and data integrity 25 | and authenticity is verified using HMAC-SHA256. 26 | 27 | Off-site backups 28 | Attic can store data on any remote host accessible over SSH. This is 29 | most efficient if Attic is also installed on the remote host. 30 | 31 | Backups mountable as filesystems 32 | Backup archives are mountable as userspace filesystems for easy backup 33 | verification and restores. 34 | 35 | What do I need? 36 | --------------- 37 | Attic requires Python 3.2 or above to work. Besides Python, Attic also requires 38 | msgpack-python and sufficiently recent OpenSSL (>= 1.0.0). 39 | In order to mount archives as filesystems, llfuse is required. 40 | 41 | How do I install it? 42 | -------------------- 43 | :: 44 | 45 | $ pip install Attic 46 | 47 | Where are the docs? 48 | ------------------- 49 | Go to https://attic-backup.org/ for a prebuilt version of the documentation. 50 | You can also build it yourself from the docs folder. 51 | 52 | Where are the tests? 53 | -------------------- 54 | The tests are in the attic/testsuite package. To run the test suite use the 55 | following command:: 56 | 57 | $ fakeroot -u python -m attic.testsuite.run 58 | -------------------------------------------------------------------------------- /attic/__init__.py: -------------------------------------------------------------------------------- 1 | # This is a python package 2 | 3 | from ._version import get_versions 4 | __version__ = get_versions()['version'] 5 | del get_versions 6 | -------------------------------------------------------------------------------- /attic/_chunker.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | /* Cyclic polynomial / buzhash: https://en.wikipedia.org/wiki/Rolling_hash */ 4 | 5 | static uint32_t table_base[] = 6 | { 7 | 0xe7f831ec, 0xf4026465, 0xafb50cae, 0x6d553c7a, 0xd639efe3, 0x19a7b895, 0x9aba5b21, 0x5417d6d4, 8 | 0x35fd2b84, 0xd1f6a159, 0x3f8e323f, 0xb419551c, 0xf444cebf, 0x21dc3b80, 0xde8d1e36, 0x84a32436, 9 | 0xbeb35a9d, 0xa36f24aa, 0xa4e60186, 0x98d18ffe, 0x3f042f9e, 0xdb228bcd, 0x096474b7, 0x5c20c2f7, 10 | 0xf9eec872, 0xe8625275, 0xb9d38f80, 0xd48eb716, 0x22a950b4, 0x3cbaaeaa, 0xc37cddd3, 0x8fea6f6a, 11 | 0x1d55d526, 0x7fd6d3b3, 0xdaa072ee, 0x4345ac40, 0xa077c642, 0x8f2bd45b, 0x28509110, 0x55557613, 12 | 0xffc17311, 0xd961ffef, 0xe532c287, 0xaab95937, 0x46d38365, 0xb065c703, 0xf2d91d0f, 0x92cd4bb0, 13 | 0x4007c712, 0xf35509dd, 0x505b2f69, 0x557ead81, 0x310f4563, 0xbddc5be8, 0x9760f38c, 0x701e0205, 14 | 0x00157244, 0x14912826, 0xdc4ca32b, 0x67b196de, 0x5db292e8, 0x8c1b406b, 0x01f34075, 0xfa2520f7, 15 | 0x73bc37ab, 0x1e18bc30, 0xfe2c6cb3, 0x20c522d0, 0x5639e3db, 0x942bda35, 0x899af9d1, 0xced44035, 16 | 0x98cc025b, 0x255f5771, 0x70fefa24, 0xe928fa4d, 0x2c030405, 0xb9325590, 0x20cb63bd, 0xa166305d, 17 | 0x80e52c0a, 0xa8fafe2f, 0x1ad13f7d, 0xcfaf3685, 0x6c83a199, 0x7d26718a, 0xde5dfcd9, 0x79cf7355, 18 | 0x8979d7fb, 0xebf8c55e, 0xebe408e4, 0xcd2affba, 0xe483be6e, 0xe239d6de, 0x5dc1e9e0, 0x0473931f, 19 | 0x851b097c, 0xac5db249, 0x09c0f9f2, 0xd8d2f134, 0xe6f38e41, 0xb1c71bf1, 0x52b6e4db, 0x07224424, 20 | 0x6cf73e85, 0x4f25d89c, 0x782a7d74, 0x10a68dcd, 0x3a868189, 0xd570d2dc, 0x69630745, 0x9542ed86, 21 | 0x331cd6b2, 0xa84b5b28, 0x07879c9d, 0x38372f64, 0x7185db11, 0x25ba7c83, 0x01061523, 0xe6792f9f, 22 | 0xe5df07d1, 0x4321b47f, 0x7d2469d8, 0x1a3a4f90, 0x48be29a3, 0x669071af, 0x8ec8dd31, 0x0810bfbf, 23 | 0x813a06b4, 0x68538345, 0x65865ddc, 0x43a71b8e, 0x78619a56, 0x5a34451d, 0x5bdaa3ed, 0x71edc7e9, 24 | 0x17ac9a20, 0x78d10bfa, 0x6c1e7f35, 0xd51839d9, 0x240cbc51, 0x33513cc1, 0xd2b4f795, 0xccaa8186, 25 | 0x0babe682, 0xa33cf164, 0x18c643ea, 0xc1ca105f, 0x9959147a, 0x6d3d94de, 0x0b654fbe, 0xed902ca0, 26 | 0x7d835cb5, 0x99ba1509, 0x6445c922, 0x495e76c2, 0xf07194bc, 0xa1631d7e, 0x677076a5, 0x89fffe35, 27 | 0x1a49bcf3, 0x8e6c948a, 0x0144c917, 0x8d93aea1, 0x16f87ddf, 0xc8f25d49, 0x1fb11297, 0x27e750cd, 28 | 0x2f422da1, 0xdee89a77, 0x1534c643, 0x457b7b8b, 0xaf172f7a, 0x6b9b09d6, 0x33573f7f, 0xf14e15c4, 29 | 0x526467d5, 0xaf488241, 0x87c3ee0d, 0x33be490c, 0x95aa6e52, 0x43ec242e, 0xd77de99b, 0xd018334f, 30 | 0x5b78d407, 0x498eb66b, 0xb1279fa8, 0xb38b0ea6, 0x90718376, 0xe325dee2, 0x8e2f2cba, 0xcaa5bdec, 31 | 0x9d652c56, 0xad68f5cb, 0xa77591af, 0x88e37ee8, 0xf8faa221, 0xfcbbbe47, 0x4f407786, 0xaf393889, 32 | 0xf444a1d9, 0x15ae1a2f, 0x40aa7097, 0x6f9486ac, 0x29d232a3, 0xe47609e9, 0xe8b631ff, 0xba8565f4, 33 | 0x11288749, 0x46c9a838, 0xeb1b7cd8, 0xf516bbb1, 0xfb74fda0, 0x010996e6, 0x4c994653, 0x1d889512, 34 | 0x53dcd9a3, 0xdd074697, 0x1e78e17c, 0x637c98bf, 0x930bb219, 0xcf7f75b0, 0xcb9355fb, 0x9e623009, 35 | 0xe466d82c, 0x28f968d3, 0xfeb385d9, 0x238e026c, 0xb8ed0560, 0x0c6a027a, 0x3d6fec4b, 0xbb4b2ec2, 36 | 0xe715031c, 0xeded011d, 0xcdc4d3b9, 0xc456fc96, 0xdd0eea20, 0xb3df8ec9, 0x12351993, 0xd9cbb01c, 37 | 0x603147a2, 0xcf37d17d, 0xf7fcd9dc, 0xd8556fa3, 0x104c8131, 0x13152774, 0xb4715811, 0x6a72c2c9, 38 | 0xc5ae37bb, 0xa76ce12a, 0x8150d8f3, 0x2ec29218, 0xa35f0984, 0x48c0647e, 0x0b5ff98c, 0x71893f7b 39 | }; 40 | 41 | #define BARREL_SHIFT(v, shift) ( ((v) << shift) | ((v) >> (32 - shift)) ) 42 | 43 | 44 | static uint32_t * 45 | buzhash_init_table(uint32_t seed) 46 | { 47 | int i; 48 | uint32_t *table = malloc(1024); 49 | for(i = 0; i < 256; i++) 50 | { 51 | table[i] = table_base[i] ^ seed; 52 | } 53 | return table; 54 | } 55 | 56 | static uint32_t 57 | buzhash(const unsigned char *data, size_t len, const uint32_t *h) 58 | { 59 | uint32_t i; 60 | uint32_t sum = 0, imod; 61 | for(i = len - 1; i > 0; i--) 62 | { 63 | imod = i & 0x1f; 64 | sum ^= BARREL_SHIFT(h[*data], imod); 65 | data++; 66 | } 67 | return sum ^ h[*data]; 68 | } 69 | 70 | static uint32_t 71 | buzhash_update(uint32_t sum, unsigned char remove, unsigned char add, size_t len, const uint32_t *h) 72 | { 73 | uint32_t lenmod = len & 0x1f; 74 | return BARREL_SHIFT(sum, 1) ^ BARREL_SHIFT(h[remove], lenmod) ^ h[add]; 75 | } 76 | 77 | typedef struct { 78 | int window_size, chunk_mask, min_size; 79 | size_t buf_size; 80 | uint32_t *table; 81 | uint8_t *data; 82 | PyObject *fd; 83 | int done, eof; 84 | size_t remaining, bytes_read, bytes_yielded, position, last; 85 | } Chunker; 86 | 87 | static Chunker * 88 | chunker_init(int window_size, int chunk_mask, int min_size, uint32_t seed) 89 | { 90 | Chunker *c = calloc(sizeof(Chunker), 1); 91 | c->window_size = window_size; 92 | c->chunk_mask = chunk_mask; 93 | c->min_size = min_size; 94 | c->table = buzhash_init_table(seed); 95 | c->buf_size = 10 * 1024 * 1024; 96 | c->data = malloc(c->buf_size); 97 | return c; 98 | } 99 | 100 | static void 101 | chunker_set_fd(Chunker *c, PyObject *fd) 102 | { 103 | Py_XDECREF(c->fd); 104 | c->fd = fd; 105 | Py_INCREF(fd); 106 | c->done = 0; 107 | c->remaining = 0; 108 | c->bytes_read = 0; 109 | c->bytes_yielded = 0; 110 | c->position = 0; 111 | c->last = 0; 112 | c->eof = 0; 113 | } 114 | 115 | static void 116 | chunker_free(Chunker *c) 117 | { 118 | Py_XDECREF(c->fd); 119 | free(c->table); 120 | free(c->data); 121 | free(c); 122 | } 123 | 124 | static int 125 | chunker_fill(Chunker *c) 126 | { 127 | size_t n; 128 | PyObject *data; 129 | memmove(c->data, c->data + c->last, c->position + c->remaining - c->last); 130 | c->position -= c->last; 131 | c->last = 0; 132 | n = c->buf_size - c->position - c->remaining; 133 | if(c->eof || n == 0) { 134 | return 1; 135 | } 136 | data = PyObject_CallMethod(c->fd, "read", "i", n); 137 | if(!data) { 138 | return 0; 139 | } 140 | n = PyBytes_Size(data); 141 | if(n) { 142 | memcpy(c->data + c->position + c->remaining, PyBytes_AsString(data), n); 143 | c->remaining += n; 144 | c->bytes_read += n; 145 | } 146 | else { 147 | c->eof = 1; 148 | } 149 | Py_DECREF(data); 150 | return 1; 151 | } 152 | 153 | static PyObject * 154 | PyBuffer_FromMemory(void *data, Py_ssize_t len) 155 | { 156 | Py_buffer buffer; 157 | PyObject *mv; 158 | 159 | PyBuffer_FillInfo(&buffer, NULL, data, len, 1, PyBUF_CONTIG_RO); 160 | mv = PyMemoryView_FromBuffer(&buffer); 161 | PyBuffer_Release(&buffer); 162 | return mv; 163 | } 164 | 165 | 166 | static PyObject * 167 | chunker_process(Chunker *c) 168 | { 169 | uint32_t sum, chunk_mask = c->chunk_mask, min_size = c->min_size, window_size = c->window_size; 170 | int n = 0; 171 | int old_last; 172 | 173 | if(c->done) { 174 | if(c->bytes_read == c->bytes_yielded) 175 | PyErr_SetNone(PyExc_StopIteration); 176 | else 177 | PyErr_SetString(PyExc_Exception, "chunkifier byte count mismatch"); 178 | return NULL; 179 | } 180 | if(c->remaining <= window_size) { 181 | if(!chunker_fill(c)) { 182 | return NULL; 183 | } 184 | } 185 | if(c->remaining < window_size) { 186 | c->done = 1; 187 | if(c->remaining) { 188 | c->bytes_yielded += c->remaining; 189 | return PyBuffer_FromMemory(c->data + c->position, c->remaining); 190 | } 191 | else { 192 | if(c->bytes_read == c->bytes_yielded) 193 | PyErr_SetNone(PyExc_StopIteration); 194 | else 195 | PyErr_SetString(PyExc_Exception, "chunkifier byte count mismatch"); 196 | return NULL; 197 | } 198 | } 199 | sum = buzhash(c->data + c->position, window_size, c->table); 200 | while(c->remaining > c->window_size && ((sum & chunk_mask) || n < min_size)) { 201 | sum = buzhash_update(sum, c->data[c->position], 202 | c->data[c->position + window_size], 203 | window_size, c->table); 204 | c->position++; 205 | c->remaining--; 206 | n++; 207 | if(c->remaining <= window_size) { 208 | if(!chunker_fill(c)) { 209 | return NULL; 210 | } 211 | } 212 | } 213 | if(c->remaining <= window_size) { 214 | c->position += c->remaining; 215 | c->remaining = 0; 216 | } 217 | old_last = c->last; 218 | c->last = c->position; 219 | n = c->last - old_last; 220 | c->bytes_yielded += n; 221 | return PyBuffer_FromMemory(c->data + old_last, n); 222 | } 223 | -------------------------------------------------------------------------------- /attic/_hashindex.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #if defined(BYTE_ORDER)&&(BYTE_ORDER == BIG_ENDIAN) 12 | #define _le32toh(x) __builtin_bswap32(x) 13 | #define _htole32(x) __builtin_bswap32(x) 14 | #elif defined(BYTE_ORDER)&&(BYTE_ORDER == LITTLE_ENDIAN) 15 | #define _le32toh(x) (x) 16 | #define _htole32(x) (x) 17 | #else 18 | #error Unknown byte order 19 | #endif 20 | 21 | typedef struct { 22 | char magic[8]; 23 | int32_t num_entries; 24 | int32_t num_buckets; 25 | int8_t key_size; 26 | int8_t value_size; 27 | } __attribute__((__packed__)) HashHeader; 28 | 29 | typedef struct { 30 | void *buckets; 31 | int num_entries; 32 | int num_buckets; 33 | int key_size; 34 | int value_size; 35 | off_t bucket_size; 36 | int lower_limit; 37 | int upper_limit; 38 | } HashIndex; 39 | 40 | #define MAGIC "ATTICIDX" 41 | #define EMPTY _htole32(0xffffffff) 42 | #define DELETED _htole32(0xfffffffe) 43 | #define MAX_BUCKET_SIZE 512 44 | #define BUCKET_LOWER_LIMIT .25 45 | #define BUCKET_UPPER_LIMIT .90 46 | #define MIN_BUCKETS 1024 47 | #define MAX(x, y) ((x) > (y) ? (x): (y)) 48 | #define BUCKET_ADDR(index, idx) (index->buckets + (idx * index->bucket_size)) 49 | 50 | #define BUCKET_IS_DELETED(index, idx) (*((uint32_t *)(BUCKET_ADDR(index, idx) + index->key_size)) == DELETED) 51 | #define BUCKET_IS_EMPTY(index, idx) (*((uint32_t *)(BUCKET_ADDR(index, idx) + index->key_size)) == EMPTY) 52 | 53 | #define BUCKET_MATCHES_KEY(index, idx, key) (memcmp(key, BUCKET_ADDR(index, idx), index->key_size) == 0) 54 | 55 | #define BUCKET_MARK_DELETED(index, idx) (*((uint32_t *)(BUCKET_ADDR(index, idx) + index->key_size)) = DELETED) 56 | #define BUCKET_MARK_EMPTY(index, idx) (*((uint32_t *)(BUCKET_ADDR(index, idx) + index->key_size)) = EMPTY) 57 | 58 | #define EPRINTF_MSG(msg, ...) fprintf(stderr, "hashindex: " msg "\n", ##__VA_ARGS__) 59 | #define EPRINTF_MSG_PATH(path, msg, ...) fprintf(stderr, "hashindex: %s: " msg "\n", path, ##__VA_ARGS__) 60 | #define EPRINTF(msg, ...) fprintf(stderr, "hashindex: " msg "(%s)\n", ##__VA_ARGS__, strerror(errno)) 61 | #define EPRINTF_PATH(path, msg, ...) fprintf(stderr, "hashindex: %s: " msg " (%s)\n", path, ##__VA_ARGS__, strerror(errno)) 62 | 63 | static HashIndex *hashindex_read(const char *path); 64 | static int hashindex_write(HashIndex *index, const char *path); 65 | static HashIndex *hashindex_init(int capacity, int key_size, int value_size); 66 | static const void *hashindex_get(HashIndex *index, const void *key); 67 | static int hashindex_set(HashIndex *index, const void *key, const void *value); 68 | static int hashindex_delete(HashIndex *index, const void *key); 69 | static void *hashindex_next_key(HashIndex *index, const void *key); 70 | 71 | /* Private API */ 72 | static int 73 | hashindex_index(HashIndex *index, const void *key) 74 | { 75 | return _le32toh(*((uint32_t *)key)) % index->num_buckets; 76 | } 77 | 78 | static int 79 | hashindex_lookup(HashIndex *index, const void *key) 80 | { 81 | int didx = -1; 82 | int start = hashindex_index(index, key); 83 | int idx = start; 84 | for(;;) { 85 | if(BUCKET_IS_EMPTY(index, idx)) 86 | { 87 | return -1; 88 | } 89 | if(BUCKET_IS_DELETED(index, idx)) { 90 | if(didx == -1) { 91 | didx = idx; 92 | } 93 | } 94 | else if(BUCKET_MATCHES_KEY(index, idx, key)) { 95 | if (didx != -1) { 96 | memcpy(BUCKET_ADDR(index, didx), BUCKET_ADDR(index, idx), index->bucket_size); 97 | BUCKET_MARK_DELETED(index, idx); 98 | idx = didx; 99 | } 100 | return idx; 101 | } 102 | idx = (idx + 1) % index->num_buckets; 103 | if(idx == start) { 104 | return -1; 105 | } 106 | } 107 | } 108 | 109 | static int 110 | hashindex_resize(HashIndex *index, int capacity) 111 | { 112 | HashIndex *new; 113 | void *key = NULL; 114 | 115 | if(!(new = hashindex_init(capacity, index->key_size, index->value_size))) { 116 | return 0; 117 | } 118 | while((key = hashindex_next_key(index, key))) { 119 | hashindex_set(new, key, hashindex_get(index, key)); 120 | } 121 | free(index->buckets); 122 | index->buckets = new->buckets; 123 | index->num_buckets = new->num_buckets; 124 | index->lower_limit = new->lower_limit; 125 | index->upper_limit = new->upper_limit; 126 | free(new); 127 | return 1; 128 | } 129 | 130 | /* Public API */ 131 | static HashIndex * 132 | hashindex_read(const char *path) 133 | { 134 | FILE *fd; 135 | off_t length, buckets_length; 136 | HashHeader header; 137 | HashIndex *index = NULL; 138 | 139 | if((fd = fopen(path, "r")) == NULL) { 140 | EPRINTF_PATH(path, "fopen failed"); 141 | return NULL; 142 | } 143 | if(fread(&header, 1, sizeof(HashHeader), fd) != sizeof(HashHeader)) { 144 | if(ferror(fd)) { 145 | EPRINTF_PATH(path, "fread failed"); 146 | } 147 | else { 148 | EPRINTF_MSG_PATH(path, "failed to read %ld bytes", sizeof(HashHeader)); 149 | } 150 | goto fail; 151 | } 152 | if(fseek(fd, 0, SEEK_END) < 0) { 153 | EPRINTF_PATH(path, "fseek failed"); 154 | goto fail; 155 | } 156 | if((length = ftell(fd)) < 0) { 157 | EPRINTF_PATH(path, "ftell failed"); 158 | goto fail; 159 | } 160 | if(fseek(fd, sizeof(HashHeader), SEEK_SET) < 0) { 161 | EPRINTF_PATH(path, "fseek failed"); 162 | goto fail; 163 | } 164 | if(memcmp(header.magic, MAGIC, 8)) { 165 | EPRINTF_MSG_PATH(path, "Unknown file header"); 166 | goto fail; 167 | } 168 | buckets_length = (off_t)_le32toh(header.num_buckets) * (header.key_size + header.value_size); 169 | if(length != sizeof(HashHeader) + buckets_length) { 170 | EPRINTF_MSG_PATH(path, "Incorrect file length"); 171 | goto fail; 172 | } 173 | if(!(index = malloc(sizeof(HashIndex)))) { 174 | EPRINTF_PATH(path, "malloc failed"); 175 | goto fail; 176 | } 177 | if(!(index->buckets = malloc(buckets_length))) { 178 | EPRINTF_PATH(path, "malloc failed"); 179 | free(index); 180 | index = NULL; 181 | goto fail; 182 | } 183 | if(fread(index->buckets, 1, buckets_length, fd) != buckets_length) { 184 | if(ferror(fd)) { 185 | EPRINTF_PATH(path, "fread failed"); 186 | } 187 | else { 188 | EPRINTF_MSG_PATH(path, "failed to read %ld bytes", length); 189 | } 190 | free(index->buckets); 191 | free(index); 192 | index = NULL; 193 | goto fail; 194 | } 195 | index->num_entries = _le32toh(header.num_entries); 196 | index->num_buckets = _le32toh(header.num_buckets); 197 | index->key_size = header.key_size; 198 | index->value_size = header.value_size; 199 | index->bucket_size = index->key_size + index->value_size; 200 | index->lower_limit = index->num_buckets > MIN_BUCKETS ? ((int)(index->num_buckets * BUCKET_LOWER_LIMIT)) : 0; 201 | index->upper_limit = (int)(index->num_buckets * BUCKET_UPPER_LIMIT); 202 | fail: 203 | if(fclose(fd) < 0) { 204 | EPRINTF_PATH(path, "fclose failed"); 205 | } 206 | return index; 207 | } 208 | 209 | static HashIndex * 210 | hashindex_init(int capacity, int key_size, int value_size) 211 | { 212 | off_t buckets_length; 213 | HashIndex *index; 214 | int i; 215 | capacity = MAX(MIN_BUCKETS, capacity); 216 | 217 | if(!(index = malloc(sizeof(HashIndex)))) { 218 | EPRINTF("malloc failed"); 219 | return NULL; 220 | } 221 | buckets_length = (off_t)capacity * (key_size + value_size); 222 | if(!(index->buckets = calloc(buckets_length, 1))) { 223 | EPRINTF("malloc failed"); 224 | free(index); 225 | return NULL; 226 | } 227 | index->num_entries = 0; 228 | index->key_size = key_size; 229 | index->value_size = value_size; 230 | index->num_buckets = capacity; 231 | index->bucket_size = index->key_size + index->value_size; 232 | index->lower_limit = index->num_buckets > MIN_BUCKETS ? ((int)(index->num_buckets * BUCKET_LOWER_LIMIT)) : 0; 233 | index->upper_limit = (int)(index->num_buckets * BUCKET_UPPER_LIMIT); 234 | for(i = 0; i < capacity; i++) { 235 | BUCKET_MARK_EMPTY(index, i); 236 | } 237 | return index; 238 | } 239 | 240 | static void 241 | hashindex_free(HashIndex *index) 242 | { 243 | free(index->buckets); 244 | free(index); 245 | } 246 | 247 | static int 248 | hashindex_write(HashIndex *index, const char *path) 249 | { 250 | off_t buckets_length = (off_t)index->num_buckets * index->bucket_size; 251 | FILE *fd; 252 | HashHeader header = { 253 | .magic = MAGIC, 254 | .num_entries = _htole32(index->num_entries), 255 | .num_buckets = _htole32(index->num_buckets), 256 | .key_size = index->key_size, 257 | .value_size = index->value_size 258 | }; 259 | int ret = 1; 260 | 261 | if((fd = fopen(path, "w")) == NULL) { 262 | EPRINTF_PATH(path, "open failed"); 263 | return 0; 264 | } 265 | if(fwrite(&header, 1, sizeof(header), fd) != sizeof(header)) { 266 | EPRINTF_PATH(path, "fwrite failed"); 267 | ret = 0; 268 | } 269 | if(fwrite(index->buckets, 1, buckets_length, fd) != buckets_length) { 270 | EPRINTF_PATH(path, "fwrite failed"); 271 | ret = 0; 272 | } 273 | if(fclose(fd) < 0) { 274 | EPRINTF_PATH(path, "fclose failed"); 275 | } 276 | return ret; 277 | } 278 | 279 | static const void * 280 | hashindex_get(HashIndex *index, const void *key) 281 | { 282 | int idx = hashindex_lookup(index, key); 283 | if(idx < 0) { 284 | return NULL; 285 | } 286 | return BUCKET_ADDR(index, idx) + index->key_size; 287 | } 288 | 289 | static int 290 | hashindex_set(HashIndex *index, const void *key, const void *value) 291 | { 292 | int idx = hashindex_lookup(index, key); 293 | uint8_t *ptr; 294 | if(idx < 0) 295 | { 296 | if(index->num_entries > index->upper_limit) { 297 | if(!hashindex_resize(index, index->num_buckets * 2)) { 298 | return 0; 299 | } 300 | } 301 | idx = hashindex_index(index, key); 302 | while(!BUCKET_IS_EMPTY(index, idx) && !BUCKET_IS_DELETED(index, idx)) { 303 | idx = (idx + 1) % index->num_buckets; 304 | } 305 | ptr = BUCKET_ADDR(index, idx); 306 | memcpy(ptr, key, index->key_size); 307 | memcpy(ptr + index->key_size, value, index->value_size); 308 | index->num_entries += 1; 309 | } 310 | else 311 | { 312 | memcpy(BUCKET_ADDR(index, idx) + index->key_size, value, index->value_size); 313 | } 314 | return 1; 315 | } 316 | 317 | static int 318 | hashindex_delete(HashIndex *index, const void *key) 319 | { 320 | int idx = hashindex_lookup(index, key); 321 | if (idx < 0) { 322 | return 1; 323 | } 324 | BUCKET_MARK_DELETED(index, idx); 325 | index->num_entries -= 1; 326 | if(index->num_entries < index->lower_limit) { 327 | if(!hashindex_resize(index, index->num_buckets / 2)) { 328 | return 0; 329 | } 330 | } 331 | return 1; 332 | } 333 | 334 | static void * 335 | hashindex_next_key(HashIndex *index, const void *key) 336 | { 337 | int idx = 0; 338 | if(key) { 339 | idx = 1 + (key - index->buckets) / index->bucket_size; 340 | } 341 | if (idx == index->num_buckets) { 342 | return NULL; 343 | } 344 | while(BUCKET_IS_EMPTY(index, idx) || BUCKET_IS_DELETED(index, idx)) { 345 | idx ++; 346 | if (idx == index->num_buckets) { 347 | return NULL; 348 | } 349 | } 350 | return BUCKET_ADDR(index, idx); 351 | } 352 | 353 | static int 354 | hashindex_get_size(HashIndex *index) 355 | { 356 | return index->num_entries; 357 | } 358 | 359 | static void 360 | hashindex_summarize(HashIndex *index, long long *total_size, long long *total_csize, long long *total_unique_size, long long *total_unique_csize) 361 | { 362 | int64_t size = 0, csize = 0, unique_size = 0, unique_csize = 0; 363 | const int32_t *values; 364 | void *key = NULL; 365 | 366 | while((key = hashindex_next_key(index, key))) { 367 | values = key + 32; 368 | unique_size += values[1]; 369 | unique_csize += values[2]; 370 | size += values[0] * values[1]; 371 | csize += values[0] * values[2]; 372 | } 373 | *total_size = size; 374 | *total_csize = csize; 375 | *total_unique_size = unique_size; 376 | *total_unique_csize = unique_csize; 377 | } 378 | 379 | -------------------------------------------------------------------------------- /attic/_version.py: -------------------------------------------------------------------------------- 1 | 2 | IN_LONG_VERSION_PY = True 3 | # This file helps to compute a version number in source trees obtained from 4 | # git-archive tarball (such as those provided by githubs download-from-tag 5 | # feature). Distribution tarballs (build by setup.py sdist) and build 6 | # directories (produced by setup.py build) will contain a much shorter file 7 | # that just contains the computed version number. 8 | 9 | # This file is released into the public domain. Generated by 10 | # versioneer-0.7+ (https://github.com/warner/python-versioneer) 11 | 12 | # these strings will be replaced by git during git-archive 13 | git_refnames = " (HEAD -> master)" 14 | git_full = "2b348104f668836f9e00103681e3bc85cb49ecae" 15 | 16 | 17 | import subprocess 18 | import sys 19 | 20 | def run_command(args, cwd=None, verbose=False): 21 | try: 22 | # remember shell=False, so use git.cmd on windows, not just git 23 | p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd) 24 | except EnvironmentError: 25 | e = sys.exc_info()[1] 26 | if verbose: 27 | print("unable to run %s" % args[0]) 28 | print(e) 29 | return None 30 | stdout = p.communicate()[0].strip() 31 | if sys.version >= '3': 32 | stdout = stdout.decode() 33 | if p.returncode != 0: 34 | if verbose: 35 | print("unable to run %s (error)" % args[0]) 36 | return None 37 | return stdout 38 | 39 | 40 | import sys 41 | import re 42 | import os.path 43 | 44 | def get_expanded_variables(versionfile_source): 45 | # the code embedded in _version.py can just fetch the value of these 46 | # variables. When used from setup.py, we don't want to import 47 | # _version.py, so we do it with a regexp instead. This function is not 48 | # used from _version.py. 49 | variables = {} 50 | try: 51 | for line in open(versionfile_source,"r").readlines(): 52 | if line.strip().startswith("git_refnames ="): 53 | mo = re.search(r'=\s*"(.*)"', line) 54 | if mo: 55 | variables["refnames"] = mo.group(1) 56 | if line.strip().startswith("git_full ="): 57 | mo = re.search(r'=\s*"(.*)"', line) 58 | if mo: 59 | variables["full"] = mo.group(1) 60 | except EnvironmentError: 61 | pass 62 | return variables 63 | 64 | def versions_from_expanded_variables(variables, tag_prefix, verbose=False): 65 | refnames = variables["refnames"].strip() 66 | if refnames.startswith("$Format"): 67 | if verbose: 68 | print("variables are unexpanded, not using") 69 | return {} # unexpanded, so not in an unpacked git-archive tarball 70 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 71 | for ref in list(refs): 72 | if not re.search(r'\d', ref): 73 | if verbose: 74 | print("discarding '%s', no digits" % ref) 75 | refs.discard(ref) 76 | # Assume all version tags have a digit. git's %d expansion 77 | # behaves like git log --decorate=short and strips out the 78 | # refs/heads/ and refs/tags/ prefixes that would let us 79 | # distinguish between branches and tags. By ignoring refnames 80 | # without digits, we filter out many common branch names like 81 | # "release" and "stabilization", as well as "HEAD" and "master". 82 | if verbose: 83 | print("remaining refs: %s" % ",".join(sorted(refs))) 84 | for ref in sorted(refs): 85 | # sorting will prefer e.g. "2.0" over "2.0rc1" 86 | if ref.startswith(tag_prefix): 87 | r = ref[len(tag_prefix):] 88 | if verbose: 89 | print("picking %s" % r) 90 | return { "version": r, 91 | "full": variables["full"].strip() } 92 | # no suitable tags, so we use the full revision id 93 | if verbose: 94 | print("no suitable tags, using full revision id") 95 | return { "version": variables["full"].strip(), 96 | "full": variables["full"].strip() } 97 | 98 | def versions_from_vcs(tag_prefix, versionfile_source, verbose=False): 99 | # this runs 'git' from the root of the source tree. That either means 100 | # someone ran a setup.py command (and this code is in versioneer.py, so 101 | # IN_LONG_VERSION_PY=False, thus the containing directory is the root of 102 | # the source tree), or someone ran a project-specific entry point (and 103 | # this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the 104 | # containing directory is somewhere deeper in the source tree). This only 105 | # gets called if the git-archive 'subst' variables were *not* expanded, 106 | # and _version.py hasn't already been rewritten with a short version 107 | # string, meaning we're inside a checked out source tree. 108 | 109 | try: 110 | here = os.path.abspath(__file__) 111 | except NameError: 112 | # some py2exe/bbfreeze/non-CPython implementations don't do __file__ 113 | return {} # not always correct 114 | 115 | # versionfile_source is the relative path from the top of the source tree 116 | # (where the .git directory might live) to this file. Invert this to find 117 | # the root from __file__. 118 | root = here 119 | if IN_LONG_VERSION_PY: 120 | for i in range(len(versionfile_source.split("/"))): 121 | root = os.path.dirname(root) 122 | else: 123 | root = os.path.dirname(here) 124 | if not os.path.exists(os.path.join(root, ".git")): 125 | if verbose: 126 | print("no .git in %s" % root) 127 | return {} 128 | 129 | GIT = "git" 130 | if sys.platform == "win32": 131 | GIT = "git.cmd" 132 | stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], 133 | cwd=root) 134 | if stdout is None: 135 | return {} 136 | if not stdout.startswith(tag_prefix): 137 | if verbose: 138 | print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) 139 | return {} 140 | tag = stdout[len(tag_prefix):] 141 | stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) 142 | if stdout is None: 143 | return {} 144 | full = stdout.strip() 145 | if tag.endswith("-dirty"): 146 | full += "-dirty" 147 | return {"version": tag, "full": full} 148 | 149 | 150 | def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False): 151 | if IN_LONG_VERSION_PY: 152 | # We're running from _version.py. If it's from a source tree 153 | # (execute-in-place), we can work upwards to find the root of the 154 | # tree, and then check the parent directory for a version string. If 155 | # it's in an installed application, there's no hope. 156 | try: 157 | here = os.path.abspath(__file__) 158 | except NameError: 159 | # py2exe/bbfreeze/non-CPython don't have __file__ 160 | return {} # without __file__, we have no hope 161 | # versionfile_source is the relative path from the top of the source 162 | # tree to _version.py. Invert this to find the root from __file__. 163 | root = here 164 | for i in range(len(versionfile_source.split("/"))): 165 | root = os.path.dirname(root) 166 | else: 167 | # we're running from versioneer.py, which means we're running from 168 | # the setup.py in a source tree. sys.argv[0] is setup.py in the root. 169 | here = os.path.abspath(sys.argv[0]) 170 | root = os.path.dirname(here) 171 | 172 | # Source tarballs conventionally unpack into a directory that includes 173 | # both the project name and a version string. 174 | dirname = os.path.basename(root) 175 | if not dirname.startswith(parentdir_prefix): 176 | if verbose: 177 | print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % 178 | (root, dirname, parentdir_prefix)) 179 | return None 180 | return {"version": dirname[len(parentdir_prefix):], "full": ""} 181 | 182 | tag_prefix = "" 183 | parentdir_prefix = "Attic-" 184 | versionfile_source = "attic/_version.py" 185 | 186 | def get_versions(default={"version": "unknown", "full": ""}, verbose=False): 187 | variables = { "refnames": git_refnames, "full": git_full } 188 | ver = versions_from_expanded_variables(variables, tag_prefix, verbose) 189 | if not ver: 190 | ver = versions_from_vcs(tag_prefix, versionfile_source, verbose) 191 | if not ver: 192 | ver = versions_from_parentdir(parentdir_prefix, versionfile_source, 193 | verbose) 194 | if not ver: 195 | ver = default 196 | return ver 197 | 198 | -------------------------------------------------------------------------------- /attic/cache.py: -------------------------------------------------------------------------------- 1 | from configparser import RawConfigParser 2 | from attic.remote import cache_if_remote 3 | import msgpack 4 | import os 5 | import sys 6 | from binascii import hexlify 7 | import shutil 8 | 9 | from .key import PlaintextKey 10 | from .helpers import Error, get_cache_dir, decode_dict, st_mtime_ns, unhexlify, UpgradableLock, int_to_bigint, \ 11 | bigint_to_int 12 | from .hashindex import ChunkIndex 13 | 14 | 15 | class Cache(object): 16 | """Client Side cache 17 | """ 18 | class RepositoryReplay(Error): 19 | """Cache is newer than repository, refusing to continue""" 20 | 21 | class CacheInitAbortedError(Error): 22 | """Cache initialization aborted""" 23 | 24 | class RepositoryAccessAborted(Error): 25 | """Repository access aborted""" 26 | 27 | class EncryptionMethodMismatch(Error): 28 | """Repository encryption method changed since last acccess, refusing to continue 29 | """ 30 | 31 | def __init__(self, repository, key, manifest, path=None, sync=True, warn_if_unencrypted=True): 32 | self.lock = None 33 | self.timestamp = None 34 | self.txn_active = False 35 | self.repository = repository 36 | self.key = key 37 | self.manifest = manifest 38 | self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii')) 39 | # Warn user before sending data to a never seen before unencrypted repository 40 | if not os.path.exists(self.path): 41 | if warn_if_unencrypted and isinstance(key, PlaintextKey): 42 | if not self._confirm('Warning: Attempting to access a previously unknown unencrypted repository', 43 | 'ATTIC_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK'): 44 | raise self.CacheInitAbortedError() 45 | self.create() 46 | self.open() 47 | # Warn user before sending data to a relocated repository 48 | if self.previous_location and self.previous_location != repository._location.canonical_path(): 49 | msg = 'Warning: The repository at location {} was previously located at {}'.format(repository._location.canonical_path(), self.previous_location) 50 | if not self._confirm(msg, 'ATTIC_RELOCATED_REPO_ACCESS_IS_OK'): 51 | raise self.RepositoryAccessAborted() 52 | 53 | if sync and self.manifest.id != self.manifest_id: 54 | # If repository is older than the cache something fishy is going on 55 | if self.timestamp and self.timestamp > manifest.timestamp: 56 | raise self.RepositoryReplay() 57 | # Make sure an encrypted repository has not been swapped for an unencrypted repository 58 | if self.key_type is not None and self.key_type != str(key.TYPE): 59 | raise self.EncryptionMethodMismatch() 60 | self.sync() 61 | self.commit() 62 | 63 | def __del__(self): 64 | self.close() 65 | 66 | def _confirm(self, message, env_var_override=None): 67 | print(message, file=sys.stderr) 68 | if env_var_override and os.environ.get(env_var_override): 69 | print("Yes (From {})".format(env_var_override)) 70 | return True 71 | if not sys.stdin.isatty(): 72 | return False 73 | try: 74 | answer = input('Do you want to continue? [yN] ') 75 | except EOFError: 76 | return False 77 | return answer and answer in 'Yy' 78 | 79 | def create(self): 80 | """Create a new empty cache at `path` 81 | """ 82 | os.makedirs(self.path) 83 | with open(os.path.join(self.path, 'README'), 'w') as fd: 84 | fd.write('This is an Attic cache') 85 | config = RawConfigParser() 86 | config.add_section('cache') 87 | config.set('cache', 'version', '1') 88 | config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii')) 89 | config.set('cache', 'manifest', '') 90 | with open(os.path.join(self.path, 'config'), 'w') as fd: 91 | config.write(fd) 92 | ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8')) 93 | with open(os.path.join(self.path, 'files'), 'w') as fd: 94 | pass # empty file 95 | 96 | def _do_open(self): 97 | self.config = RawConfigParser() 98 | self.config.read(os.path.join(self.path, 'config')) 99 | if self.config.getint('cache', 'version') != 1: 100 | raise Exception('%s Does not look like an Attic cache') 101 | self.id = self.config.get('cache', 'repository') 102 | self.manifest_id = unhexlify(self.config.get('cache', 'manifest')) 103 | self.timestamp = self.config.get('cache', 'timestamp', fallback=None) 104 | self.key_type = self.config.get('cache', 'key_type', fallback=None) 105 | self.previous_location = self.config.get('cache', 'previous_location', fallback=None) 106 | self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8')) 107 | self.files = None 108 | 109 | def open(self): 110 | if not os.path.isdir(self.path): 111 | raise Exception('%s Does not look like an Attic cache' % self.path) 112 | self.lock = UpgradableLock(os.path.join(self.path, 'config'), exclusive=True) 113 | self.rollback() 114 | 115 | def close(self): 116 | if self.lock: 117 | self.lock.release() 118 | self.lock = None 119 | 120 | def _read_files(self): 121 | self.files = {} 122 | self._newest_mtime = 0 123 | with open(os.path.join(self.path, 'files'), 'rb') as fd: 124 | u = msgpack.Unpacker(use_list=True) 125 | while True: 126 | data = fd.read(64 * 1024) 127 | if not data: 128 | break 129 | u.feed(data) 130 | for path_hash, item in u: 131 | item[0] += 1 132 | self.files[path_hash] = msgpack.packb(item) 133 | 134 | def begin_txn(self): 135 | # Initialize transaction snapshot 136 | txn_dir = os.path.join(self.path, 'txn.tmp') 137 | os.mkdir(txn_dir) 138 | shutil.copy(os.path.join(self.path, 'config'), txn_dir) 139 | shutil.copy(os.path.join(self.path, 'chunks'), txn_dir) 140 | shutil.copy(os.path.join(self.path, 'files'), txn_dir) 141 | os.rename(os.path.join(self.path, 'txn.tmp'), 142 | os.path.join(self.path, 'txn.active')) 143 | self.txn_active = True 144 | 145 | def commit(self): 146 | """Commit transaction 147 | """ 148 | if not self.txn_active: 149 | return 150 | if self.files is not None: 151 | with open(os.path.join(self.path, 'files'), 'wb') as fd: 152 | for path_hash, item in self.files.items(): 153 | # Discard cached files with the newest mtime to avoid 154 | # issues with filesystem snapshots and mtime precision 155 | item = msgpack.unpackb(item) 156 | if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime: 157 | msgpack.pack((path_hash, item), fd) 158 | self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii')) 159 | self.config.set('cache', 'timestamp', self.manifest.timestamp) 160 | self.config.set('cache', 'key_type', str(self.key.TYPE)) 161 | self.config.set('cache', 'previous_location', self.repository._location.canonical_path()) 162 | with open(os.path.join(self.path, 'config'), 'w') as fd: 163 | self.config.write(fd) 164 | self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8')) 165 | os.rename(os.path.join(self.path, 'txn.active'), 166 | os.path.join(self.path, 'txn.tmp')) 167 | shutil.rmtree(os.path.join(self.path, 'txn.tmp')) 168 | self.txn_active = False 169 | 170 | def rollback(self): 171 | """Roll back partial and aborted transactions 172 | """ 173 | # Remove partial transaction 174 | if os.path.exists(os.path.join(self.path, 'txn.tmp')): 175 | shutil.rmtree(os.path.join(self.path, 'txn.tmp')) 176 | # Roll back active transaction 177 | txn_dir = os.path.join(self.path, 'txn.active') 178 | if os.path.exists(txn_dir): 179 | shutil.copy(os.path.join(txn_dir, 'config'), self.path) 180 | shutil.copy(os.path.join(txn_dir, 'chunks'), self.path) 181 | shutil.copy(os.path.join(txn_dir, 'files'), self.path) 182 | os.rename(txn_dir, os.path.join(self.path, 'txn.tmp')) 183 | if os.path.exists(os.path.join(self.path, 'txn.tmp')): 184 | shutil.rmtree(os.path.join(self.path, 'txn.tmp')) 185 | self.txn_active = False 186 | self._do_open() 187 | 188 | def sync(self): 189 | """Initializes cache by fetching and reading all archive indicies 190 | """ 191 | def add(id, size, csize): 192 | try: 193 | count, size, csize = self.chunks[id] 194 | self.chunks[id] = count + 1, size, csize 195 | except KeyError: 196 | self.chunks[id] = 1, size, csize 197 | self.begin_txn() 198 | print('Initializing cache...') 199 | self.chunks.clear() 200 | unpacker = msgpack.Unpacker() 201 | repository = cache_if_remote(self.repository) 202 | for name, info in self.manifest.archives.items(): 203 | archive_id = info[b'id'] 204 | cdata = repository.get(archive_id) 205 | data = self.key.decrypt(archive_id, cdata) 206 | add(archive_id, len(data), len(cdata)) 207 | archive = msgpack.unpackb(data) 208 | if archive[b'version'] != 1: 209 | raise Exception('Unknown archive metadata version') 210 | decode_dict(archive, (b'name',)) 211 | print('Analyzing archive:', archive[b'name']) 212 | for key, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])): 213 | data = self.key.decrypt(key, chunk) 214 | add(key, len(data), len(chunk)) 215 | unpacker.feed(data) 216 | for item in unpacker: 217 | if b'chunks' in item: 218 | for chunk_id, size, csize in item[b'chunks']: 219 | add(chunk_id, size, csize) 220 | 221 | def add_chunk(self, id, data, stats): 222 | if not self.txn_active: 223 | self.begin_txn() 224 | if self.seen_chunk(id): 225 | return self.chunk_incref(id, stats) 226 | size = len(data) 227 | data = self.key.encrypt(data) 228 | csize = len(data) 229 | self.repository.put(id, data, wait=False) 230 | self.chunks[id] = (1, size, csize) 231 | stats.update(size, csize, True) 232 | return id, size, csize 233 | 234 | def seen_chunk(self, id): 235 | return self.chunks.get(id, (0, 0, 0))[0] 236 | 237 | def chunk_incref(self, id, stats): 238 | if not self.txn_active: 239 | self.begin_txn() 240 | count, size, csize = self.chunks[id] 241 | self.chunks[id] = (count + 1, size, csize) 242 | stats.update(size, csize, False) 243 | return id, size, csize 244 | 245 | def chunk_decref(self, id, stats): 246 | if not self.txn_active: 247 | self.begin_txn() 248 | count, size, csize = self.chunks[id] 249 | if count == 1: 250 | del self.chunks[id] 251 | self.repository.delete(id, wait=False) 252 | stats.update(-size, -csize, True) 253 | else: 254 | self.chunks[id] = (count - 1, size, csize) 255 | stats.update(-size, -csize, False) 256 | 257 | def file_known_and_unchanged(self, path_hash, st): 258 | if self.files is None: 259 | self._read_files() 260 | entry = self.files.get(path_hash) 261 | if not entry: 262 | return None 263 | entry = msgpack.unpackb(entry) 264 | if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino: 265 | # reset entry age 266 | entry[0] = 0 267 | self.files[path_hash] = msgpack.packb(entry) 268 | return entry[4] 269 | else: 270 | return None 271 | 272 | def memorize_file(self, path_hash, st, ids): 273 | # Entry: Age, inode, size, mtime, chunk ids 274 | mtime_ns = st_mtime_ns(st) 275 | self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids)) 276 | self._newest_mtime = max(self._newest_mtime, mtime_ns) 277 | -------------------------------------------------------------------------------- /attic/chunker.pyx: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | API_VERSION = 2 4 | 5 | from libc.stdlib cimport free 6 | 7 | cdef extern from "_chunker.c": 8 | ctypedef int uint32_t 9 | ctypedef struct _Chunker "Chunker": 10 | pass 11 | _Chunker *chunker_init(int window_size, int chunk_mask, int min_size, uint32_t seed) 12 | void chunker_set_fd(_Chunker *chunker, object fd) 13 | void chunker_free(_Chunker *chunker) 14 | object chunker_process(_Chunker *chunker) 15 | uint32_t *buzhash_init_table(uint32_t seed) 16 | uint32_t c_buzhash "buzhash"(unsigned char *data, size_t len, uint32_t *h) 17 | uint32_t c_buzhash_update "buzhash_update"(uint32_t sum, unsigned char remove, unsigned char add, size_t len, uint32_t *h) 18 | 19 | 20 | cdef class Chunker: 21 | cdef _Chunker *chunker 22 | 23 | def __cinit__(self, window_size, chunk_mask, min_size, seed): 24 | self.chunker = chunker_init(window_size, chunk_mask, min_size, seed & 0xffffffff) 25 | 26 | def chunkify(self, fd): 27 | chunker_set_fd(self.chunker, fd) 28 | return self 29 | 30 | def __dealloc__(self): 31 | if self.chunker: 32 | chunker_free(self.chunker) 33 | 34 | def __iter__(self): 35 | return self 36 | 37 | def __next__(self): 38 | return chunker_process(self.chunker) 39 | 40 | 41 | def buzhash(unsigned char *data, unsigned long seed): 42 | cdef uint32_t *table 43 | cdef uint32_t sum 44 | table = buzhash_init_table(seed & 0xffffffff) 45 | sum = c_buzhash(data, len(data), table) 46 | free(table) 47 | return sum 48 | 49 | 50 | def buzhash_update(uint32_t sum, unsigned char remove, unsigned char add, size_t len, unsigned long seed): 51 | cdef uint32_t *table 52 | table = buzhash_init_table(seed & 0xffffffff) 53 | sum = c_buzhash_update(sum, remove, add, len, table) 54 | free(table) 55 | return sum -------------------------------------------------------------------------------- /attic/crypto.pyx: -------------------------------------------------------------------------------- 1 | """A thin OpenSSL wrapper 2 | 3 | This could be replaced by PyCrypto or something similar when the performance 4 | of their PBKDF2 implementation is comparable to the OpenSSL version. 5 | """ 6 | from libc.stdlib cimport malloc, free 7 | 8 | API_VERSION = 2 9 | 10 | cdef extern from "openssl/rand.h": 11 | int RAND_bytes(unsigned char *buf,int num) 12 | 13 | 14 | cdef extern from "openssl/evp.h": 15 | ctypedef struct EVP_MD: 16 | pass 17 | ctypedef struct EVP_CIPHER: 18 | pass 19 | ctypedef struct EVP_CIPHER_CTX: 20 | unsigned char *iv 21 | pass 22 | ctypedef struct ENGINE: 23 | pass 24 | const EVP_MD *EVP_sha256() 25 | const EVP_CIPHER *EVP_aes_256_ctr() 26 | void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *a) 27 | void EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *a) 28 | 29 | int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx,const EVP_CIPHER *cipher, ENGINE *impl, 30 | const unsigned char *key, const unsigned char *iv) 31 | int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, 32 | int *outl, const unsigned char *in_, int inl) 33 | 34 | int PKCS5_PBKDF2_HMAC(const char *password, int passwordlen, 35 | const unsigned char *salt, int saltlen, int iter, 36 | const EVP_MD *digest, 37 | int keylen, unsigned char *out) 38 | 39 | import struct 40 | 41 | _int = struct.Struct('>I') 42 | _long = struct.Struct('>Q') 43 | 44 | bytes_to_int = lambda x, offset=0: _int.unpack_from(x, offset)[0] 45 | bytes_to_long = lambda x, offset=0: _long.unpack_from(x, offset)[0] 46 | long_to_bytes = lambda x: _long.pack(x) 47 | 48 | 49 | def num_aes_blocks(length): 50 | """Return the number of AES blocks required to encrypt/decrypt *length* bytes of data 51 | """ 52 | return (length + 15) // 16 53 | 54 | 55 | def pbkdf2_sha256(password, salt, iterations, size): 56 | """Password based key derivation function 2 (RFC2898) 57 | """ 58 | cdef unsigned char *key = malloc(size) 59 | if not key: 60 | raise MemoryError 61 | try: 62 | rv = PKCS5_PBKDF2_HMAC(password, len(password), salt, len(salt), iterations, EVP_sha256(), size, key) 63 | if not rv: 64 | raise Exception('PKCS5_PBKDF2_HMAC failed') 65 | return key[:size] 66 | finally: 67 | free(key) 68 | 69 | 70 | def get_random_bytes(n): 71 | """Return n cryptographically strong pseudo-random bytes 72 | """ 73 | cdef unsigned char *buf = malloc(n) 74 | if not buf: 75 | raise MemoryError 76 | try: 77 | if RAND_bytes(buf, n) < 1: 78 | raise Exception('RAND_bytes failed') 79 | return buf[:n] 80 | finally: 81 | free(buf) 82 | 83 | 84 | cdef class AES: 85 | """A thin wrapper around the OpenSSL EVP cipher API 86 | """ 87 | cdef EVP_CIPHER_CTX ctx 88 | 89 | def __cinit__(self, key, iv=None): 90 | EVP_CIPHER_CTX_init(&self.ctx) 91 | if not EVP_EncryptInit_ex(&self.ctx, EVP_aes_256_ctr(), NULL, NULL, NULL): 92 | raise Exception('EVP_EncryptInit_ex failed') 93 | self.reset(key, iv) 94 | 95 | def __dealloc__(self): 96 | EVP_CIPHER_CTX_cleanup(&self.ctx) 97 | 98 | def reset(self, key=None, iv=None): 99 | cdef const unsigned char *key2 = NULL 100 | cdef const unsigned char *iv2 = NULL 101 | if key: 102 | key2 = key 103 | if iv: 104 | iv2 = iv 105 | if not EVP_EncryptInit_ex(&self.ctx, NULL, NULL, key2, iv2): 106 | raise Exception('EVP_EncryptInit_ex failed') 107 | 108 | @property 109 | def iv(self): 110 | return self.ctx.iv[:16] 111 | 112 | def encrypt(self, data): 113 | cdef int inl = len(data) 114 | cdef int outl 115 | cdef unsigned char *out = malloc(inl) 116 | if not out: 117 | raise MemoryError 118 | try: 119 | if not EVP_EncryptUpdate(&self.ctx, out, &outl, data, inl): 120 | raise Exception('EVP_EncryptUpdate failed') 121 | return out[:inl] 122 | finally: 123 | free(out) 124 | decrypt = encrypt 125 | 126 | -------------------------------------------------------------------------------- /attic/fuse.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import errno 3 | import io 4 | import llfuse 5 | import msgpack 6 | import os 7 | import stat 8 | import tempfile 9 | import time 10 | from attic.archive import Archive 11 | from attic.helpers import daemonize 12 | from attic.remote import cache_if_remote 13 | 14 | # Does this version of llfuse support ns precision? 15 | have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns') 16 | 17 | 18 | class ItemCache: 19 | def __init__(self): 20 | self.fd = tempfile.TemporaryFile() 21 | self.offset = 1000000 22 | 23 | def add(self, item): 24 | pos = self.fd.seek(0, io.SEEK_END) 25 | self.fd.write(msgpack.packb(item)) 26 | return pos + self.offset 27 | 28 | def get(self, inode): 29 | self.fd.seek(inode - self.offset, io.SEEK_SET) 30 | return next(msgpack.Unpacker(self.fd)) 31 | 32 | 33 | class AtticOperations(llfuse.Operations): 34 | """Export Attic archive as a fuse filesystem 35 | """ 36 | def __init__(self, key, repository, manifest, archive): 37 | super(AtticOperations, self).__init__() 38 | self._inode_count = 0 39 | self.key = key 40 | self.repository = cache_if_remote(repository) 41 | self.items = {} 42 | self.parent = {} 43 | self.contents = defaultdict(dict) 44 | self.default_dir = {b'mode': 0o40755, b'mtime': int(time.time() * 1e9), b'uid': os.getuid(), b'gid': os.getgid()} 45 | self.pending_archives = {} 46 | self.cache = ItemCache() 47 | if archive: 48 | self.process_archive(archive) 49 | else: 50 | # Create root inode 51 | self.parent[1] = self.allocate_inode() 52 | self.items[1] = self.default_dir 53 | for archive_name in manifest.archives: 54 | # Create archive placeholder inode 55 | archive_inode = self.allocate_inode() 56 | self.items[archive_inode] = self.default_dir 57 | self.parent[archive_inode] = 1 58 | self.contents[1][os.fsencode(archive_name)] = archive_inode 59 | self.pending_archives[archive_inode] = Archive(repository, key, manifest, archive_name) 60 | 61 | def process_archive(self, archive, prefix=[]): 62 | """Build fuse inode hierarchy from archive metadata 63 | """ 64 | unpacker = msgpack.Unpacker() 65 | for key, chunk in zip(archive.metadata[b'items'], self.repository.get_many(archive.metadata[b'items'])): 66 | data = self.key.decrypt(key, chunk) 67 | unpacker.feed(data) 68 | for item in unpacker: 69 | segments = prefix + os.fsencode(os.path.normpath(item[b'path'])).split(b'/') 70 | del item[b'path'] 71 | num_segments = len(segments) 72 | parent = 1 73 | for i, segment in enumerate(segments, 1): 74 | # Insert a default root inode if needed 75 | if self._inode_count == 0 and segment: 76 | archive_inode = self.allocate_inode() 77 | self.items[archive_inode] = self.default_dir 78 | self.parent[archive_inode] = parent 79 | # Leaf segment? 80 | if i == num_segments: 81 | if b'source' in item and stat.S_ISREG(item[b'mode']): 82 | inode = self._find_inode(item[b'source'], prefix) 83 | item = self.cache.get(inode) 84 | item[b'nlink'] = item.get(b'nlink', 1) + 1 85 | self.items[inode] = item 86 | else: 87 | inode = self.cache.add(item) 88 | self.parent[inode] = parent 89 | if segment: 90 | self.contents[parent][segment] = inode 91 | elif segment in self.contents[parent]: 92 | parent = self.contents[parent][segment] 93 | else: 94 | inode = self.allocate_inode() 95 | self.items[inode] = self.default_dir 96 | self.parent[inode] = parent 97 | if segment: 98 | self.contents[parent][segment] = inode 99 | parent = inode 100 | 101 | def allocate_inode(self): 102 | self._inode_count += 1 103 | return self._inode_count 104 | 105 | def statfs(self): 106 | stat_ = llfuse.StatvfsData() 107 | stat_.f_bsize = 512 108 | stat_.f_frsize = 512 109 | stat_.f_blocks = 0 110 | stat_.f_bfree = 0 111 | stat_.f_bavail = 0 112 | stat_.f_files = 0 113 | stat_.f_ffree = 0 114 | stat_.f_favail = 0 115 | return stat_ 116 | 117 | def get_item(self, inode): 118 | try: 119 | return self.items[inode] 120 | except KeyError: 121 | return self.cache.get(inode) 122 | 123 | def _find_inode(self, path, prefix=[]): 124 | segments = prefix + os.fsencode(os.path.normpath(path)).split(b'/') 125 | inode = 1 126 | for segment in segments: 127 | inode = self.contents[inode][segment] 128 | return inode 129 | 130 | def getattr(self, inode): 131 | item = self.get_item(inode) 132 | size = 0 133 | try: 134 | size = sum(size for _, size, _ in item[b'chunks']) 135 | except KeyError: 136 | pass 137 | entry = llfuse.EntryAttributes() 138 | entry.st_ino = inode 139 | entry.generation = 0 140 | entry.entry_timeout = 300 141 | entry.attr_timeout = 300 142 | entry.st_mode = item[b'mode'] 143 | entry.st_nlink = item.get(b'nlink', 1) 144 | entry.st_uid = item[b'uid'] 145 | entry.st_gid = item[b'gid'] 146 | entry.st_rdev = item.get(b'rdev', 0) 147 | entry.st_size = size 148 | entry.st_blksize = 512 149 | entry.st_blocks = 1 150 | if have_fuse_mtime_ns: 151 | entry.st_atime_ns = item[b'mtime'] 152 | entry.st_mtime_ns = item[b'mtime'] 153 | entry.st_ctime_ns = item[b'mtime'] 154 | else: 155 | entry.st_atime = item[b'mtime'] / 1e9 156 | entry.st_mtime = item[b'mtime'] / 1e9 157 | entry.st_ctime = item[b'mtime'] / 1e9 158 | return entry 159 | 160 | def listxattr(self, inode): 161 | item = self.get_item(inode) 162 | return item.get(b'xattrs', {}).keys() 163 | 164 | def getxattr(self, inode, name): 165 | item = self.get_item(inode) 166 | try: 167 | return item.get(b'xattrs', {})[name] 168 | except KeyError: 169 | raise llfuse.FUSEError(errno.ENODATA) 170 | 171 | def _load_pending_archive(self, inode): 172 | # Check if this is an archive we need to load 173 | archive = self.pending_archives.pop(inode, None) 174 | if archive: 175 | self.process_archive(archive, [os.fsencode(archive.name)]) 176 | 177 | def lookup(self, parent_inode, name): 178 | self._load_pending_archive(parent_inode) 179 | if name == b'.': 180 | inode = parent_inode 181 | elif name == b'..': 182 | inode = self.parent[parent_inode] 183 | else: 184 | inode = self.contents[parent_inode].get(name) 185 | if not inode: 186 | raise llfuse.FUSEError(errno.ENOENT) 187 | return self.getattr(inode) 188 | 189 | def open(self, inode, flags): 190 | return inode 191 | 192 | def opendir(self, inode): 193 | self._load_pending_archive(inode) 194 | return inode 195 | 196 | def read(self, fh, offset, size): 197 | parts = [] 198 | item = self.get_item(fh) 199 | for id, s, csize in item[b'chunks']: 200 | if s < offset: 201 | offset -= s 202 | continue 203 | n = min(size, s - offset) 204 | chunk = self.key.decrypt(id, self.repository.get(id)) 205 | parts.append(chunk[offset:offset+n]) 206 | offset = 0 207 | size -= n 208 | if not size: 209 | break 210 | return b''.join(parts) 211 | 212 | def readdir(self, fh, off): 213 | entries = [(b'.', fh), (b'..', self.parent[fh])] 214 | entries.extend(self.contents[fh].items()) 215 | for i, (name, inode) in enumerate(entries[off:], off): 216 | yield name, self.getattr(inode), i + 1 217 | 218 | def readlink(self, inode): 219 | item = self.get_item(inode) 220 | return os.fsencode(item[b'source']) 221 | 222 | def mount(self, mountpoint, extra_options, foreground=False): 223 | options = ['fsname=atticfs', 'ro'] 224 | if extra_options: 225 | options.extend(extra_options.split(',')) 226 | llfuse.init(self, mountpoint, options) 227 | if not foreground: 228 | daemonize() 229 | try: 230 | llfuse.main(single=True) 231 | except: 232 | llfuse.close() 233 | raise 234 | llfuse.close() 235 | -------------------------------------------------------------------------------- /attic/hashindex.pyx: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | 4 | API_VERSION = 2 5 | 6 | 7 | cdef extern from "_hashindex.c": 8 | ctypedef struct HashIndex: 9 | pass 10 | 11 | HashIndex *hashindex_read(char *path) 12 | HashIndex *hashindex_init(int capacity, int key_size, int value_size) 13 | void hashindex_free(HashIndex *index) 14 | void hashindex_summarize(HashIndex *index, long long *total_size, long long *total_csize, long long *unique_size, long long *unique_csize) 15 | int hashindex_get_size(HashIndex *index) 16 | int hashindex_write(HashIndex *index, char *path) 17 | void *hashindex_get(HashIndex *index, void *key) 18 | void *hashindex_next_key(HashIndex *index, void *key) 19 | int hashindex_delete(HashIndex *index, void *key) 20 | int hashindex_set(HashIndex *index, void *key, void *value) 21 | int _htole32(int v) 22 | int _le32toh(int v) 23 | 24 | 25 | _NoDefault = object() 26 | 27 | cdef class IndexBase: 28 | cdef HashIndex *index 29 | key_size = 32 30 | 31 | def __cinit__(self, capacity=0, path=None): 32 | if path: 33 | self.index = hashindex_read(os.fsencode(path)) 34 | if not self.index: 35 | raise Exception('hashindex_read failed') 36 | else: 37 | self.index = hashindex_init(capacity, self.key_size, self.value_size) 38 | if not self.index: 39 | raise Exception('hashindex_init failed') 40 | 41 | def __dealloc__(self): 42 | if self.index: 43 | hashindex_free(self.index) 44 | 45 | @classmethod 46 | def read(cls, path): 47 | return cls(path=path) 48 | 49 | def write(self, path): 50 | if not hashindex_write(self.index, os.fsencode(path)): 51 | raise Exception('hashindex_write failed') 52 | 53 | def clear(self): 54 | hashindex_free(self.index) 55 | self.index = hashindex_init(0, self.key_size, self.value_size) 56 | if not self.index: 57 | raise Exception('hashindex_init failed') 58 | 59 | def setdefault(self, key, value): 60 | if not key in self: 61 | self[key] = value 62 | 63 | def __delitem__(self, key): 64 | assert len(key) == 32 65 | if not hashindex_delete(self.index, key): 66 | raise Exception('hashindex_delete failed') 67 | 68 | def get(self, key, default=None): 69 | try: 70 | return self[key] 71 | except KeyError: 72 | return default 73 | 74 | def pop(self, key, default=_NoDefault): 75 | try: 76 | value = self[key] 77 | del self[key] 78 | return value 79 | except KeyError: 80 | if default != _NoDefault: 81 | return default 82 | raise 83 | 84 | def __len__(self): 85 | return hashindex_get_size(self.index) 86 | 87 | 88 | cdef class NSIndex(IndexBase): 89 | 90 | value_size = 8 91 | 92 | def __getitem__(self, key): 93 | assert len(key) == 32 94 | data = hashindex_get(self.index, key) 95 | if not data: 96 | raise KeyError 97 | return _le32toh(data[0]), _le32toh(data[1]) 98 | 99 | def __setitem__(self, key, value): 100 | assert len(key) == 32 101 | cdef int[2] data 102 | data[0] = _htole32(value[0]) 103 | data[1] = _htole32(value[1]) 104 | if not hashindex_set(self.index, key, data): 105 | raise Exception('hashindex_set failed') 106 | 107 | def __contains__(self, key): 108 | assert len(key) == 32 109 | data = hashindex_get(self.index, key) 110 | return data != NULL 111 | 112 | def iteritems(self, marker=None): 113 | cdef const void *key 114 | iter = NSKeyIterator() 115 | iter.idx = self 116 | iter.index = self.index 117 | if marker: 118 | key = hashindex_get(self.index, marker) 119 | if marker is None: 120 | raise IndexError 121 | iter.key = key - 32 122 | return iter 123 | 124 | 125 | cdef class NSKeyIterator: 126 | cdef NSIndex idx 127 | cdef HashIndex *index 128 | cdef const void *key 129 | 130 | def __cinit__(self): 131 | self.key = NULL 132 | 133 | def __iter__(self): 134 | return self 135 | 136 | def __next__(self): 137 | self.key = hashindex_next_key(self.index, self.key) 138 | if not self.key: 139 | raise StopIteration 140 | cdef int *value = (self.key + 32) 141 | return (self.key)[:32], (_le32toh(value[0]), _le32toh(value[1])) 142 | 143 | 144 | cdef class ChunkIndex(IndexBase): 145 | 146 | value_size = 12 147 | 148 | def __getitem__(self, key): 149 | assert len(key) == 32 150 | data = hashindex_get(self.index, key) 151 | if not data: 152 | raise KeyError 153 | return _le32toh(data[0]), _le32toh(data[1]), _le32toh(data[2]) 154 | 155 | def __setitem__(self, key, value): 156 | assert len(key) == 32 157 | cdef int[3] data 158 | data[0] = _htole32(value[0]) 159 | data[1] = _htole32(value[1]) 160 | data[2] = _htole32(value[2]) 161 | if not hashindex_set(self.index, key, data): 162 | raise Exception('hashindex_set failed') 163 | 164 | def __contains__(self, key): 165 | assert len(key) == 32 166 | data = hashindex_get(self.index, key) 167 | return data != NULL 168 | 169 | def iteritems(self, marker=None): 170 | cdef const void *key 171 | iter = ChunkKeyIterator() 172 | iter.idx = self 173 | iter.index = self.index 174 | if marker: 175 | key = hashindex_get(self.index, marker) 176 | if marker is None: 177 | raise IndexError 178 | iter.key = key - 32 179 | return iter 180 | 181 | def summarize(self): 182 | cdef long long total_size, total_csize, unique_size, unique_csize 183 | hashindex_summarize(self.index, &total_size, &total_csize, &unique_size, &unique_csize) 184 | return total_size, total_csize, unique_size, unique_csize 185 | 186 | 187 | cdef class ChunkKeyIterator: 188 | cdef ChunkIndex idx 189 | cdef HashIndex *index 190 | cdef const void *key 191 | 192 | def __cinit__(self): 193 | self.key = NULL 194 | 195 | def __iter__(self): 196 | return self 197 | 198 | def __next__(self): 199 | self.key = hashindex_next_key(self.index, self.key) 200 | if not self.key: 201 | raise StopIteration 202 | cdef int *value = (self.key + 32) 203 | return (self.key)[:32], (_le32toh(value[0]), _le32toh(value[1]), _le32toh(value[2])) 204 | -------------------------------------------------------------------------------- /attic/key.py: -------------------------------------------------------------------------------- 1 | from binascii import hexlify, a2b_base64, b2a_base64 2 | from getpass import getpass 3 | import os 4 | import msgpack 5 | import textwrap 6 | import hmac 7 | from hashlib import sha256 8 | import zlib 9 | 10 | from attic.crypto import pbkdf2_sha256, get_random_bytes, AES, bytes_to_long, long_to_bytes, bytes_to_int, num_aes_blocks 11 | from attic.helpers import IntegrityError, get_keys_dir, Error 12 | 13 | PREFIX = b'\0' * 8 14 | 15 | 16 | class UnsupportedPayloadError(Error): 17 | """Unsupported payload type {}. A newer version is required to access this repository. 18 | """ 19 | 20 | class KeyfileNotFoundError(Error): 21 | """No key file for repository {} found in {}. 22 | """ 23 | 24 | 25 | class HMAC(hmac.HMAC): 26 | """Workaround a bug in Python < 3.4 Where HMAC does not accept memoryviews 27 | """ 28 | def update(self, msg): 29 | self.inner.update(msg) 30 | 31 | 32 | def key_creator(repository, args): 33 | if args.encryption == 'keyfile': 34 | return KeyfileKey.create(repository, args) 35 | elif args.encryption == 'passphrase': 36 | return PassphraseKey.create(repository, args) 37 | else: 38 | return PlaintextKey.create(repository, args) 39 | 40 | 41 | def key_factory(repository, manifest_data): 42 | if manifest_data[0] == KeyfileKey.TYPE: 43 | return KeyfileKey.detect(repository, manifest_data) 44 | elif manifest_data[0] == PassphraseKey.TYPE: 45 | return PassphraseKey.detect(repository, manifest_data) 46 | elif manifest_data[0] == PlaintextKey.TYPE: 47 | return PlaintextKey.detect(repository, manifest_data) 48 | else: 49 | raise UnsupportedPayloadError(manifest_data[0]) 50 | 51 | 52 | class KeyBase(object): 53 | 54 | def __init__(self): 55 | self.TYPE_STR = bytes([self.TYPE]) 56 | 57 | def id_hash(self, data): 58 | """Return HMAC hash using the "id" HMAC key 59 | """ 60 | 61 | def encrypt(self, data): 62 | pass 63 | 64 | def decrypt(self, id, data): 65 | pass 66 | 67 | 68 | class PlaintextKey(KeyBase): 69 | TYPE = 0x02 70 | 71 | chunk_seed = 0 72 | 73 | @classmethod 74 | def create(cls, repository, args): 75 | print('Encryption NOT enabled.\nUse the "--encryption=passphrase|keyfile" to enable encryption.') 76 | return cls() 77 | 78 | @classmethod 79 | def detect(cls, repository, manifest_data): 80 | return cls() 81 | 82 | def id_hash(self, data): 83 | return sha256(data).digest() 84 | 85 | def encrypt(self, data): 86 | return b''.join([self.TYPE_STR, zlib.compress(data)]) 87 | 88 | def decrypt(self, id, data): 89 | if data[0] != self.TYPE: 90 | raise IntegrityError('Invalid encryption envelope') 91 | data = zlib.decompress(memoryview(data)[1:]) 92 | if id and sha256(data).digest() != id: 93 | raise IntegrityError('Chunk id verification failed') 94 | return data 95 | 96 | 97 | class AESKeyBase(KeyBase): 98 | """Common base class shared by KeyfileKey and PassphraseKey 99 | 100 | Chunks are encrypted using 256bit AES in Counter Mode (CTR) 101 | 102 | Payload layout: TYPE(1) + HMAC(32) + NONCE(8) + CIPHERTEXT 103 | 104 | To reduce payload size only 8 bytes of the 16 bytes nonce is saved 105 | in the payload, the first 8 bytes are always zeros. This does not 106 | affect security but limits the maximum repository capacity to 107 | only 295 exabytes! 108 | """ 109 | 110 | PAYLOAD_OVERHEAD = 1 + 32 + 8 # TYPE + HMAC + NONCE 111 | 112 | def id_hash(self, data): 113 | """Return HMAC hash using the "id" HMAC key 114 | """ 115 | return HMAC(self.id_key, data, sha256).digest() 116 | 117 | def encrypt(self, data): 118 | data = zlib.compress(data) 119 | self.enc_cipher.reset() 120 | data = b''.join((self.enc_cipher.iv[8:], self.enc_cipher.encrypt(data))) 121 | hmac = HMAC(self.enc_hmac_key, data, sha256).digest() 122 | return b''.join((self.TYPE_STR, hmac, data)) 123 | 124 | def decrypt(self, id, data): 125 | if data[0] != self.TYPE: 126 | raise IntegrityError('Invalid encryption envelope') 127 | hmac = memoryview(data)[1:33] 128 | if memoryview(HMAC(self.enc_hmac_key, memoryview(data)[33:], sha256).digest()) != hmac: 129 | raise IntegrityError('Encryption envelope checksum mismatch') 130 | self.dec_cipher.reset(iv=PREFIX + data[33:41]) 131 | data = zlib.decompress(self.dec_cipher.decrypt(data[41:])) # should use memoryview 132 | if id and HMAC(self.id_key, data, sha256).digest() != id: 133 | raise IntegrityError('Chunk id verification failed') 134 | return data 135 | 136 | def extract_nonce(self, payload): 137 | if payload[0] != self.TYPE: 138 | raise IntegrityError('Invalid encryption envelope') 139 | nonce = bytes_to_long(payload[33:41]) 140 | return nonce 141 | 142 | def init_from_random_data(self, data): 143 | self.enc_key = data[0:32] 144 | self.enc_hmac_key = data[32:64] 145 | self.id_key = data[64:96] 146 | self.chunk_seed = bytes_to_int(data[96:100]) 147 | # Convert to signed int32 148 | if self.chunk_seed & 0x80000000: 149 | self.chunk_seed = self.chunk_seed - 0xffffffff - 1 150 | 151 | def init_ciphers(self, enc_iv=b''): 152 | self.enc_cipher = AES(self.enc_key, enc_iv) 153 | self.dec_cipher = AES(self.enc_key) 154 | 155 | 156 | class PassphraseKey(AESKeyBase): 157 | TYPE = 0x01 158 | iterations = 100000 159 | 160 | @classmethod 161 | def create(cls, repository, args): 162 | key = cls() 163 | passphrase = os.environ.get('ATTIC_PASSPHRASE') 164 | if passphrase is not None: 165 | passphrase2 = passphrase 166 | else: 167 | passphrase, passphrase2 = 1, 2 168 | while passphrase != passphrase2: 169 | passphrase = getpass('Enter passphrase: ') 170 | if not passphrase: 171 | print('Passphrase must not be blank') 172 | continue 173 | passphrase2 = getpass('Enter same passphrase again: ') 174 | if passphrase != passphrase2: 175 | print('Passphrases do not match') 176 | key.init(repository, passphrase) 177 | if passphrase: 178 | print('Remember your passphrase. Your data will be inaccessible without it.') 179 | return key 180 | 181 | @classmethod 182 | def detect(cls, repository, manifest_data): 183 | prompt = 'Enter passphrase for %s: ' % repository._location.orig 184 | key = cls() 185 | passphrase = os.environ.get('ATTIC_PASSPHRASE') 186 | if passphrase is None: 187 | passphrase = getpass(prompt) 188 | while True: 189 | key.init(repository, passphrase) 190 | try: 191 | key.decrypt(None, manifest_data) 192 | num_blocks = num_aes_blocks(len(manifest_data) - 41) 193 | key.init_ciphers(PREFIX + long_to_bytes(key.extract_nonce(manifest_data) + num_blocks)) 194 | return key 195 | except IntegrityError: 196 | passphrase = getpass(prompt) 197 | 198 | def init(self, repository, passphrase): 199 | self.init_from_random_data(pbkdf2_sha256(passphrase.encode('utf-8'), repository.id, self.iterations, 100)) 200 | self.init_ciphers() 201 | 202 | 203 | class KeyfileKey(AESKeyBase): 204 | FILE_ID = 'ATTIC KEY' 205 | TYPE = 0x00 206 | 207 | @classmethod 208 | def detect(cls, repository, manifest_data): 209 | key = cls() 210 | path = cls.find_key_file(repository) 211 | prompt = 'Enter passphrase for key file %s: ' % path 212 | passphrase = os.environ.get('ATTIC_PASSPHRASE', '') 213 | while not key.load(path, passphrase): 214 | passphrase = getpass(prompt) 215 | num_blocks = num_aes_blocks(len(manifest_data) - 41) 216 | key.init_ciphers(PREFIX + long_to_bytes(key.extract_nonce(manifest_data) + num_blocks)) 217 | return key 218 | 219 | @classmethod 220 | def find_key_file(cls, repository): 221 | id = hexlify(repository.id).decode('ascii') 222 | keys_dir = get_keys_dir() 223 | for name in os.listdir(keys_dir): 224 | filename = os.path.join(keys_dir, name) 225 | with open(filename, 'r') as fd: 226 | line = fd.readline().strip() 227 | if line and line.startswith(cls.FILE_ID) and line[10:] == id: 228 | return filename 229 | raise KeyfileNotFoundError(repository._location.canonical_path(), get_keys_dir()) 230 | 231 | def load(self, filename, passphrase): 232 | with open(filename, 'r') as fd: 233 | cdata = a2b_base64(''.join(fd.readlines()[1:]).encode('ascii')) # .encode needed for Python 3.[0-2] 234 | data = self.decrypt_key_file(cdata, passphrase) 235 | if data: 236 | key = msgpack.unpackb(data) 237 | if key[b'version'] != 1: 238 | raise IntegrityError('Invalid key file header') 239 | self.repository_id = key[b'repository_id'] 240 | self.enc_key = key[b'enc_key'] 241 | self.enc_hmac_key = key[b'enc_hmac_key'] 242 | self.id_key = key[b'id_key'] 243 | self.chunk_seed = key[b'chunk_seed'] 244 | self.path = filename 245 | return True 246 | 247 | def decrypt_key_file(self, data, passphrase): 248 | d = msgpack.unpackb(data) 249 | assert d[b'version'] == 1 250 | assert d[b'algorithm'] == b'sha256' 251 | key = pbkdf2_sha256(passphrase.encode('utf-8'), d[b'salt'], d[b'iterations'], 32) 252 | data = AES(key).decrypt(d[b'data']) 253 | if HMAC(key, data, sha256).digest() != d[b'hash']: 254 | return None 255 | return data 256 | 257 | def encrypt_key_file(self, data, passphrase): 258 | salt = get_random_bytes(32) 259 | iterations = 100000 260 | key = pbkdf2_sha256(passphrase.encode('utf-8'), salt, iterations, 32) 261 | hash = HMAC(key, data, sha256).digest() 262 | cdata = AES(key).encrypt(data) 263 | d = { 264 | 'version': 1, 265 | 'salt': salt, 266 | 'iterations': iterations, 267 | 'algorithm': 'sha256', 268 | 'hash': hash, 269 | 'data': cdata, 270 | } 271 | return msgpack.packb(d) 272 | 273 | def save(self, path, passphrase): 274 | key = { 275 | 'version': 1, 276 | 'repository_id': self.repository_id, 277 | 'enc_key': self.enc_key, 278 | 'enc_hmac_key': self.enc_hmac_key, 279 | 'id_key': self.id_key, 280 | 'chunk_seed': self.chunk_seed, 281 | } 282 | data = self.encrypt_key_file(msgpack.packb(key), passphrase) 283 | with open(path, 'w') as fd: 284 | fd.write('%s %s\n' % (self.FILE_ID, hexlify(self.repository_id).decode('ascii'))) 285 | fd.write('\n'.join(textwrap.wrap(b2a_base64(data).decode('ascii')))) 286 | fd.write('\n') 287 | self.path = path 288 | 289 | def change_passphrase(self): 290 | passphrase, passphrase2 = 1, 2 291 | while passphrase != passphrase2: 292 | passphrase = getpass('New passphrase: ') 293 | passphrase2 = getpass('Enter same passphrase again: ') 294 | if passphrase != passphrase2: 295 | print('Passphrases do not match') 296 | self.save(self.path, passphrase) 297 | print('Key file "%s" updated' % self.path) 298 | 299 | @classmethod 300 | def create(cls, repository, args): 301 | filename = args.repository.to_key_filename() 302 | path = filename 303 | i = 1 304 | while os.path.exists(path): 305 | i += 1 306 | path = filename + '.%d' % i 307 | passphrase = os.environ.get('ATTIC_PASSPHRASE') 308 | if passphrase is not None: 309 | passphrase2 = passphrase 310 | else: 311 | passphrase, passphrase2 = 1, 2 312 | while passphrase != passphrase2: 313 | passphrase = getpass('Enter passphrase (empty for no passphrase):') 314 | passphrase2 = getpass('Enter same passphrase again: ') 315 | if passphrase != passphrase2: 316 | print('Passphrases do not match') 317 | key = cls() 318 | key.repository_id = repository.id 319 | key.init_from_random_data(get_random_bytes(100)) 320 | key.init_ciphers() 321 | key.save(path, passphrase) 322 | print('Key file "%s" created.' % key.path) 323 | print('Keep this file safe. Your data will be inaccessible without it.') 324 | return key 325 | -------------------------------------------------------------------------------- /attic/lrucache.py: -------------------------------------------------------------------------------- 1 | class LRUCache(dict): 2 | 3 | def __init__(self, capacity): 4 | super(LRUCache, self).__init__() 5 | self._lru = [] 6 | self._capacity = capacity 7 | 8 | def __setitem__(self, key, value): 9 | try: 10 | self._lru.remove(key) 11 | except ValueError: 12 | pass 13 | self._lru.append(key) 14 | while len(self._lru) > self._capacity: 15 | del self[self._lru[0]] 16 | return super(LRUCache, self).__setitem__(key, value) 17 | 18 | def __getitem__(self, key): 19 | try: 20 | self._lru.remove(key) 21 | self._lru.append(key) 22 | except ValueError: 23 | pass 24 | return super(LRUCache, self).__getitem__(key) 25 | 26 | def __delitem__(self, key): 27 | try: 28 | self._lru.remove(key) 29 | except ValueError: 30 | pass 31 | return super(LRUCache, self).__delitem__(key) 32 | 33 | def pop(self, key, default=None): 34 | try: 35 | self._lru.remove(key) 36 | except ValueError: 37 | pass 38 | return super(LRUCache, self).pop(key, default) 39 | 40 | def _not_implemented(self, *args, **kw): 41 | raise NotImplementedError 42 | popitem = setdefault = update = _not_implemented 43 | -------------------------------------------------------------------------------- /attic/platform.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if sys.platform.startswith('linux'): 4 | from attic.platform_linux import acl_get, acl_set, API_VERSION 5 | elif sys.platform.startswith('freebsd'): 6 | from attic.platform_freebsd import acl_get, acl_set, API_VERSION 7 | elif sys.platform == 'darwin': 8 | from attic.platform_darwin import acl_get, acl_set, API_VERSION 9 | else: 10 | API_VERSION = 2 11 | 12 | def acl_get(path, item, st, numeric_owner=False): 13 | pass 14 | def acl_set(path, item, numeric_owner=False): 15 | pass 16 | -------------------------------------------------------------------------------- /attic/platform_darwin.pyx: -------------------------------------------------------------------------------- 1 | import os 2 | from attic.helpers import user2uid, group2gid 3 | 4 | API_VERSION = 2 5 | 6 | cdef extern from "sys/acl.h": 7 | ctypedef struct _acl_t: 8 | pass 9 | ctypedef _acl_t *acl_t 10 | 11 | int acl_free(void *obj) 12 | acl_t acl_get_link_np(const char *path, int type) 13 | acl_t acl_set_link_np(const char *path, int type, acl_t acl) 14 | acl_t acl_from_text(const char *buf) 15 | char *acl_to_text(acl_t acl, ssize_t *len_p) 16 | int ACL_TYPE_EXTENDED 17 | 18 | 19 | def _remove_numeric_id_if_possible(acl): 20 | """Replace the user/group field with the local uid/gid if possible 21 | """ 22 | entries = [] 23 | for entry in acl.decode('ascii').split('\n'): 24 | if entry: 25 | fields = entry.split(':') 26 | if fields[0] == 'user': 27 | if user2uid(fields[2]) is not None: 28 | fields[1] = fields[3] = '' 29 | elif fields[0] == 'group': 30 | if group2gid(fields[2]) is not None: 31 | fields[1] = fields[3] = '' 32 | entries.append(':'.join(fields)) 33 | return ('\n'.join(entries)).encode('ascii') 34 | 35 | 36 | def _remove_non_numeric_identifier(acl): 37 | """Remove user and group names from the acl 38 | """ 39 | entries = [] 40 | for entry in acl.split(b'\n'): 41 | if entry: 42 | fields = entry.split(b':') 43 | if fields[0] in (b'user', b'group'): 44 | fields[2] = b'' 45 | entries.append(b':'.join(fields)) 46 | else: 47 | entries.append(entry) 48 | return b'\n'.join(entries) 49 | 50 | 51 | def acl_get(path, item, st, numeric_owner=False): 52 | cdef acl_t acl = NULL 53 | cdef char *text = NULL 54 | try: 55 | acl = acl_get_link_np(os.fsencode(path), ACL_TYPE_EXTENDED) 56 | if acl == NULL: 57 | return 58 | text = acl_to_text(acl, NULL) 59 | if text == NULL: 60 | return 61 | if numeric_owner: 62 | item[b'acl_extended'] = _remove_non_numeric_identifier(text) 63 | else: 64 | item[b'acl_extended'] = text 65 | finally: 66 | acl_free(text) 67 | acl_free(acl) 68 | 69 | 70 | def acl_set(path, item, numeric_owner=False): 71 | cdef acl_t acl = NULL 72 | try: 73 | try: 74 | if numeric_owner: 75 | acl = acl_from_text(item[b'acl_extended']) 76 | else: 77 | acl = acl_from_text(_remove_numeric_id_if_possible(item[b'acl_extended'])) 78 | except KeyError: 79 | return 80 | if acl == NULL: 81 | return 82 | if acl_set_link_np(os.fsencode(path), ACL_TYPE_EXTENDED, acl): 83 | return 84 | finally: 85 | acl_free(acl) 86 | 87 | -------------------------------------------------------------------------------- /attic/platform_freebsd.pyx: -------------------------------------------------------------------------------- 1 | import os 2 | from attic.helpers import posix_acl_use_stored_uid_gid 3 | 4 | API_VERSION = 2 5 | 6 | cdef extern from "errno.h": 7 | int errno 8 | int EINVAL 9 | 10 | cdef extern from "sys/types.h": 11 | int ACL_TYPE_ACCESS 12 | int ACL_TYPE_DEFAULT 13 | int ACL_TYPE_NFS4 14 | 15 | cdef extern from "sys/acl.h": 16 | ctypedef struct _acl_t: 17 | pass 18 | ctypedef _acl_t *acl_t 19 | 20 | int acl_free(void *obj) 21 | acl_t acl_get_link_np(const char *path, int type) 22 | acl_t acl_set_link_np(const char *path, int type, acl_t acl) 23 | acl_t acl_from_text(const char *buf) 24 | char *acl_to_text_np(acl_t acl, ssize_t *len, int flags) 25 | int ACL_TEXT_NUMERIC_IDS 26 | int ACL_TEXT_APPEND_ID 27 | 28 | cdef extern from "unistd.h": 29 | long lpathconf(const char *path, int name) 30 | int _PC_ACL_NFS4 31 | 32 | 33 | cdef _get_acl(p, type, item, attribute, int flags): 34 | cdef acl_t acl 35 | cdef char *text 36 | acl = acl_get_link_np(p, type) 37 | if acl: 38 | text = acl_to_text_np(acl, NULL, flags) 39 | if text: 40 | item[attribute] = text 41 | acl_free(text) 42 | acl_free(acl) 43 | 44 | 45 | def acl_get(path, item, st, numeric_owner=False): 46 | """Saves ACL Entries 47 | 48 | If `numeric_owner` is True the user/group field is not preserved only uid/gid 49 | """ 50 | cdef int flags = ACL_TEXT_APPEND_ID 51 | p = os.fsencode(path) 52 | ret = lpathconf(p, _PC_ACL_NFS4) 53 | if ret < 0 and errno == EINVAL: 54 | return 55 | flags |= ACL_TEXT_NUMERIC_IDS if numeric_owner else 0 56 | if ret > 0: 57 | _get_acl(p, ACL_TYPE_NFS4, item, b'acl_nfs4', flags) 58 | else: 59 | _get_acl(p, ACL_TYPE_ACCESS, item, b'acl_access', flags) 60 | _get_acl(p, ACL_TYPE_DEFAULT, item, b'acl_default', flags) 61 | 62 | 63 | cdef _set_acl(p, type, item, attribute, numeric_owner=False): 64 | cdef acl_t acl 65 | text = item.get(attribute) 66 | if text: 67 | if numeric_owner and type == ACL_TYPE_NFS4: 68 | text = _nfs4_use_stored_uid_gid(text) 69 | elif numeric_owner and type in(ACL_TYPE_ACCESS, ACL_TYPE_DEFAULT): 70 | text = posix_acl_use_stored_uid_gid(text) 71 | acl = acl_from_text(text) 72 | if acl: 73 | acl_set_link_np(p, type, acl) 74 | acl_free(acl) 75 | 76 | 77 | cdef _nfs4_use_stored_uid_gid(acl): 78 | """Replace the user/group field with the stored uid/gid 79 | """ 80 | entries = [] 81 | for entry in acl.decode('ascii').split('\n'): 82 | if entry: 83 | if entry.startswith('user:') or entry.startswith('group:'): 84 | fields = entry.split(':') 85 | entries.append(':'.join(fields[0], fields[5], *fields[2:-1])) 86 | else: 87 | entries.append(entry) 88 | return ('\n'.join(entries)).encode('ascii') 89 | 90 | 91 | def acl_set(path, item, numeric_owner=False): 92 | """Restore ACL Entries 93 | 94 | If `numeric_owner` is True the stored uid/gid is used instead 95 | of the user/group names 96 | """ 97 | p = os.fsencode(path) 98 | _set_acl(p, ACL_TYPE_NFS4, item, b'acl_nfs4', numeric_owner) 99 | _set_acl(p, ACL_TYPE_ACCESS, item, b'acl_access', numeric_owner) 100 | _set_acl(p, ACL_TYPE_DEFAULT, item, b'acl_default', numeric_owner) 101 | -------------------------------------------------------------------------------- /attic/platform_linux.pyx: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from stat import S_ISLNK 4 | from attic.helpers import posix_acl_use_stored_uid_gid, user2uid, group2gid 5 | 6 | API_VERSION = 2 7 | 8 | cdef extern from "sys/types.h": 9 | int ACL_TYPE_ACCESS 10 | int ACL_TYPE_DEFAULT 11 | 12 | cdef extern from "sys/acl.h": 13 | ctypedef struct _acl_t: 14 | pass 15 | ctypedef _acl_t *acl_t 16 | 17 | int acl_free(void *obj) 18 | acl_t acl_get_file(const char *path, int type) 19 | acl_t acl_set_file(const char *path, int type, acl_t acl) 20 | acl_t acl_from_text(const char *buf) 21 | char *acl_to_text(acl_t acl, ssize_t *len) 22 | 23 | cdef extern from "acl/libacl.h": 24 | int acl_extended_file(const char *path) 25 | 26 | 27 | _comment_re = re.compile(' *#.*', re.M) 28 | 29 | 30 | def acl_use_local_uid_gid(acl): 31 | """Replace the user/group field with the local uid/gid if possible 32 | """ 33 | entries = [] 34 | for entry in acl.decode('ascii').split('\n'): 35 | if entry: 36 | fields = entry.split(':') 37 | if fields[0] == 'user' and fields[1]: 38 | fields[1] = user2uid(fields[1], fields[3]) 39 | elif fields[0] == 'group' and fields[1]: 40 | fields[1] = group2gid(fields[1], fields[3]) 41 | entries.append(':'.join(entry.split(':')[:3])) 42 | return ('\n'.join(entries)).encode('ascii') 43 | 44 | 45 | cdef acl_append_numeric_ids(acl): 46 | """Extend the "POSIX 1003.1e draft standard 17" format with an additional uid/gid field 47 | """ 48 | entries = [] 49 | for entry in _comment_re.sub('', acl.decode('ascii')).split('\n'): 50 | if entry: 51 | type, name, permission = entry.split(':') 52 | if name and type == 'user': 53 | entries.append(':'.join([type, name, permission, str(user2uid(name, name))])) 54 | elif name and type == 'group': 55 | entries.append(':'.join([type, name, permission, str(group2gid(name, name))])) 56 | else: 57 | entries.append(entry) 58 | return ('\n'.join(entries)).encode('ascii') 59 | 60 | 61 | cdef acl_numeric_ids(acl): 62 | """Replace the "POSIX 1003.1e draft standard 17" user/group field with uid/gid 63 | """ 64 | entries = [] 65 | for entry in _comment_re.sub('', acl.decode('ascii')).split('\n'): 66 | if entry: 67 | type, name, permission = entry.split(':') 68 | if name and type == 'user': 69 | uid = str(user2uid(name, name)) 70 | entries.append(':'.join([type, uid, permission, uid])) 71 | elif name and type == 'group': 72 | gid = str(group2gid(name, name)) 73 | entries.append(':'.join([type, gid, permission, gid])) 74 | else: 75 | entries.append(entry) 76 | return ('\n'.join(entries)).encode('ascii') 77 | 78 | 79 | def acl_get(path, item, st, numeric_owner=False): 80 | """Saves ACL Entries 81 | 82 | If `numeric_owner` is True the user/group field is not preserved only uid/gid 83 | """ 84 | cdef acl_t default_acl = NULL 85 | cdef acl_t access_acl = NULL 86 | cdef char *default_text = NULL 87 | cdef char *access_text = NULL 88 | 89 | p = os.fsencode(path) 90 | if S_ISLNK(st.st_mode) or acl_extended_file(p) <= 0: 91 | return 92 | if numeric_owner: 93 | converter = acl_numeric_ids 94 | else: 95 | converter = acl_append_numeric_ids 96 | try: 97 | access_acl = acl_get_file(p, ACL_TYPE_ACCESS) 98 | if access_acl: 99 | access_text = acl_to_text(access_acl, NULL) 100 | if access_text: 101 | item[b'acl_access'] = converter(access_text) 102 | default_acl = acl_get_file(p, ACL_TYPE_DEFAULT) 103 | if default_acl: 104 | default_text = acl_to_text(default_acl, NULL) 105 | if default_text: 106 | item[b'acl_default'] = converter(default_text) 107 | finally: 108 | acl_free(default_text) 109 | acl_free(default_acl) 110 | acl_free(access_text) 111 | acl_free(access_acl) 112 | 113 | 114 | def acl_set(path, item, numeric_owner=False): 115 | """Restore ACL Entries 116 | 117 | If `numeric_owner` is True the stored uid/gid is used instead 118 | of the user/group names 119 | """ 120 | cdef acl_t access_acl = NULL 121 | cdef acl_t default_acl = NULL 122 | 123 | p = os.fsencode(path) 124 | if numeric_owner: 125 | converter = posix_acl_use_stored_uid_gid 126 | else: 127 | converter = acl_use_local_uid_gid 128 | access_text = item.get(b'acl_access') 129 | default_text = item.get(b'acl_default') 130 | if access_text: 131 | try: 132 | access_acl = acl_from_text(converter(access_text)) 133 | if access_acl: 134 | acl_set_file(p, ACL_TYPE_ACCESS, access_acl) 135 | finally: 136 | acl_free(access_acl) 137 | if default_text: 138 | try: 139 | default_acl = acl_from_text(converter(default_text)) 140 | if default_acl: 141 | acl_set_file(p, ACL_TYPE_DEFAULT, default_acl) 142 | finally: 143 | acl_free(default_acl) 144 | -------------------------------------------------------------------------------- /attic/remote.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import fcntl 3 | import msgpack 4 | import os 5 | import select 6 | import shutil 7 | from subprocess import Popen, PIPE 8 | import sys 9 | import tempfile 10 | 11 | from .hashindex import NSIndex 12 | from .helpers import Error, IntegrityError 13 | from .repository import Repository 14 | 15 | BUFSIZE = 10 * 1024 * 1024 16 | 17 | 18 | class ConnectionClosed(Error): 19 | """Connection closed by remote host""" 20 | 21 | 22 | class PathNotAllowed(Error): 23 | """Repository path not allowed""" 24 | 25 | 26 | class RepositoryServer(object): 27 | 28 | def __init__(self, restrict_to_paths): 29 | self.repository = None 30 | self.restrict_to_paths = restrict_to_paths 31 | 32 | def serve(self): 33 | stdin_fd = sys.stdin.fileno() 34 | stdout_fd = sys.stdout.fileno() 35 | # Make stdin non-blocking 36 | fl = fcntl.fcntl(stdin_fd, fcntl.F_GETFL) 37 | fcntl.fcntl(stdin_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) 38 | # Make stdout blocking 39 | fl = fcntl.fcntl(stdout_fd, fcntl.F_GETFL) 40 | fcntl.fcntl(stdout_fd, fcntl.F_SETFL, fl & ~os.O_NONBLOCK) 41 | unpacker = msgpack.Unpacker(use_list=False) 42 | while True: 43 | r, w, es = select.select([stdin_fd], [], [], 10) 44 | if r: 45 | data = os.read(stdin_fd, BUFSIZE) 46 | if not data: 47 | return 48 | unpacker.feed(data) 49 | for type, msgid, method, args in unpacker: 50 | method = method.decode('ascii') 51 | try: 52 | try: 53 | f = getattr(self, method) 54 | except AttributeError: 55 | f = getattr(self.repository, method) 56 | res = f(*args) 57 | except Exception as e: 58 | os.write(stdout_fd, msgpack.packb((1, msgid, e.__class__.__name__, e.args))) 59 | else: 60 | os.write(stdout_fd, msgpack.packb((1, msgid, None, res))) 61 | if es: 62 | return 63 | 64 | def negotiate(self, versions): 65 | return 1 66 | 67 | def open(self, path, create=False): 68 | path = os.fsdecode(path) 69 | if path.startswith('/~'): 70 | path = path[1:] 71 | path = os.path.realpath(os.path.expanduser(path)) 72 | if self.restrict_to_paths: 73 | for restrict_to_path in self.restrict_to_paths: 74 | if path.startswith(os.path.realpath(restrict_to_path)): 75 | break 76 | else: 77 | raise PathNotAllowed(path) 78 | self.repository = Repository(path, create) 79 | return self.repository.id 80 | 81 | 82 | class RemoteRepository(object): 83 | extra_test_args = [] 84 | 85 | class RPCError(Exception): 86 | 87 | def __init__(self, name): 88 | self.name = name 89 | 90 | def __init__(self, location, create=False): 91 | self.location = location 92 | self.preload_ids = [] 93 | self.msgid = 0 94 | self.to_send = b'' 95 | self.cache = {} 96 | self.ignore_responses = set() 97 | self.responses = {} 98 | self.unpacker = msgpack.Unpacker(use_list=False) 99 | self.p = None 100 | if location.host == '__testsuite__': 101 | args = [sys.executable, '-m', 'attic.archiver', 'serve'] + self.extra_test_args 102 | else: 103 | args = ['ssh'] 104 | if location.port: 105 | args += ['-p', str(location.port)] 106 | if location.user: 107 | args.append('%s@%s' % (location.user, location.host)) 108 | else: 109 | args.append('%s' % location.host) 110 | args += ['attic', 'serve'] 111 | self.p = Popen(args, bufsize=0, stdin=PIPE, stdout=PIPE) 112 | self.stdin_fd = self.p.stdin.fileno() 113 | self.stdout_fd = self.p.stdout.fileno() 114 | fcntl.fcntl(self.stdin_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdin_fd, fcntl.F_GETFL) | os.O_NONBLOCK) 115 | fcntl.fcntl(self.stdout_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdout_fd, fcntl.F_GETFL) | os.O_NONBLOCK) 116 | self.r_fds = [self.stdout_fd] 117 | self.x_fds = [self.stdin_fd, self.stdout_fd] 118 | 119 | version = self.call('negotiate', 1) 120 | if version != 1: 121 | raise Exception('Server insisted on using unsupported protocol version %d' % version) 122 | self.id = self.call('open', location.path, create) 123 | 124 | def __del__(self): 125 | self.close() 126 | 127 | def call(self, cmd, *args, **kw): 128 | for resp in self.call_many(cmd, [args], **kw): 129 | return resp 130 | 131 | def call_many(self, cmd, calls, wait=True, is_preloaded=False): 132 | if not calls: 133 | return 134 | def fetch_from_cache(args): 135 | msgid = self.cache[args].pop(0) 136 | if not self.cache[args]: 137 | del self.cache[args] 138 | return msgid 139 | 140 | calls = list(calls) 141 | waiting_for = [] 142 | w_fds = [self.stdin_fd] 143 | while wait or calls: 144 | while waiting_for: 145 | try: 146 | error, res = self.responses.pop(waiting_for[0]) 147 | waiting_for.pop(0) 148 | if error: 149 | if error == b'DoesNotExist': 150 | raise Repository.DoesNotExist(self.location.orig) 151 | elif error == b'AlreadyExists': 152 | raise Repository.AlreadyExists(self.location.orig) 153 | elif error == b'CheckNeeded': 154 | raise Repository.CheckNeeded(self.location.orig) 155 | elif error == b'IntegrityError': 156 | raise IntegrityError(res) 157 | elif error == b'PathNotAllowed': 158 | raise PathNotAllowed(*res) 159 | if error == b'ObjectNotFound': 160 | raise Repository.ObjectNotFound(res[0], self.location.orig) 161 | raise self.RPCError(error) 162 | else: 163 | yield res 164 | if not waiting_for and not calls: 165 | return 166 | except KeyError: 167 | break 168 | r, w, x = select.select(self.r_fds, w_fds, self.x_fds, 1) 169 | if x: 170 | raise Exception('FD exception occured') 171 | if r: 172 | data = os.read(self.stdout_fd, BUFSIZE) 173 | if not data: 174 | raise ConnectionClosed() 175 | self.unpacker.feed(data) 176 | for type, msgid, error, res in self.unpacker: 177 | if msgid in self.ignore_responses: 178 | self.ignore_responses.remove(msgid) 179 | else: 180 | self.responses[msgid] = error, res 181 | if w: 182 | while not self.to_send and (calls or self.preload_ids) and len(waiting_for) < 100: 183 | if calls: 184 | if is_preloaded: 185 | if calls[0] in self.cache: 186 | waiting_for.append(fetch_from_cache(calls.pop(0))) 187 | else: 188 | args = calls.pop(0) 189 | if cmd == 'get' and args in self.cache: 190 | waiting_for.append(fetch_from_cache(args)) 191 | else: 192 | self.msgid += 1 193 | waiting_for.append(self.msgid) 194 | self.to_send = msgpack.packb((1, self.msgid, cmd, args)) 195 | if not self.to_send and self.preload_ids: 196 | args = (self.preload_ids.pop(0),) 197 | self.msgid += 1 198 | self.cache.setdefault(args, []).append(self.msgid) 199 | self.to_send = msgpack.packb((1, self.msgid, cmd, args)) 200 | 201 | if self.to_send: 202 | try: 203 | self.to_send = self.to_send[os.write(self.stdin_fd, self.to_send):] 204 | except OSError as e: 205 | # io.write might raise EAGAIN even though select indicates 206 | # that the fd should be writable 207 | if e.errno != errno.EAGAIN: 208 | raise 209 | if not self.to_send and not (calls or self.preload_ids): 210 | w_fds = [] 211 | self.ignore_responses |= set(waiting_for) 212 | 213 | def check(self, repair=False): 214 | return self.call('check', repair) 215 | 216 | def commit(self, *args): 217 | return self.call('commit') 218 | 219 | def rollback(self, *args): 220 | return self.call('rollback') 221 | 222 | def __len__(self): 223 | return self.call('__len__') 224 | 225 | def list(self, limit=None, marker=None): 226 | return self.call('list', limit, marker) 227 | 228 | def get(self, id_): 229 | for resp in self.get_many([id_]): 230 | return resp 231 | 232 | def get_many(self, ids, is_preloaded=False): 233 | for resp in self.call_many('get', [(id_,) for id_ in ids], is_preloaded=is_preloaded): 234 | yield resp 235 | 236 | def put(self, id_, data, wait=True): 237 | return self.call('put', id_, data, wait=wait) 238 | 239 | def delete(self, id_, wait=True): 240 | return self.call('delete', id_, wait=wait) 241 | 242 | def close(self): 243 | if self.p: 244 | self.p.stdin.close() 245 | self.p.stdout.close() 246 | self.p.wait() 247 | self.p = None 248 | 249 | def preload(self, ids): 250 | self.preload_ids += ids 251 | 252 | 253 | class RepositoryCache: 254 | """A caching Repository wrapper 255 | 256 | Caches Repository GET operations using a temporary file 257 | """ 258 | def __init__(self, repository): 259 | self.tmppath = None 260 | self.index = None 261 | self.data_fd = None 262 | self.repository = repository 263 | self.entries = {} 264 | self.initialize() 265 | 266 | def __del__(self): 267 | self.cleanup() 268 | 269 | def initialize(self): 270 | self.tmppath = tempfile.mkdtemp() 271 | self.index = NSIndex() 272 | self.data_fd = open(os.path.join(self.tmppath, 'data'), 'a+b') 273 | 274 | def cleanup(self): 275 | del self.index 276 | if self.data_fd: 277 | self.data_fd.close() 278 | if self.tmppath: 279 | shutil.rmtree(self.tmppath) 280 | 281 | def load_object(self, offset, size): 282 | self.data_fd.seek(offset) 283 | data = self.data_fd.read(size) 284 | assert len(data) == size 285 | return data 286 | 287 | def store_object(self, key, data): 288 | self.data_fd.seek(0, os.SEEK_END) 289 | self.data_fd.write(data) 290 | offset = self.data_fd.tell() 291 | self.index[key] = offset - len(data), len(data) 292 | 293 | def get(self, key): 294 | return next(self.get_many([key])) 295 | 296 | def get_many(self, keys): 297 | unknown_keys = [key for key in keys if not key in self.index] 298 | repository_iterator = zip(unknown_keys, self.repository.get_many(unknown_keys)) 299 | for key in keys: 300 | try: 301 | yield self.load_object(*self.index[key]) 302 | except KeyError: 303 | for key_, data in repository_iterator: 304 | if key_ == key: 305 | self.store_object(key, data) 306 | yield data 307 | break 308 | # Consume any pending requests 309 | for _ in repository_iterator: 310 | pass 311 | 312 | 313 | def cache_if_remote(repository): 314 | if isinstance(repository, RemoteRepository): 315 | return RepositoryCache(repository) 316 | return repository 317 | -------------------------------------------------------------------------------- /attic/testsuite/__init__.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | import filecmp 3 | import os 4 | import posix 5 | import sys 6 | import sysconfig 7 | import time 8 | import unittest 9 | from attic.helpers import st_mtime_ns 10 | from attic.xattr import get_all 11 | 12 | try: 13 | import llfuse 14 | # Does this version of llfuse support ns precision? 15 | have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns') 16 | except ImportError: 17 | have_fuse_mtime_ns = False 18 | 19 | has_lchflags = hasattr(os, 'lchflags') 20 | 21 | 22 | # The mtime get/set precison varies on different OS and Python versions 23 | if 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []): 24 | st_mtime_ns_round = 0 25 | elif 'HAVE_UTIMES' in sysconfig.get_config_vars(): 26 | st_mtime_ns_round = -6 27 | else: 28 | st_mtime_ns_round = -9 29 | 30 | 31 | has_mtime_ns = sys.version >= '3.3' 32 | utime_supports_fd = os.utime in getattr(os, 'supports_fd', {}) 33 | 34 | 35 | class AtticTestCase(unittest.TestCase): 36 | """ 37 | """ 38 | assert_in = unittest.TestCase.assertIn 39 | assert_not_in = unittest.TestCase.assertNotIn 40 | assert_equal = unittest.TestCase.assertEqual 41 | assert_not_equal = unittest.TestCase.assertNotEqual 42 | assert_raises = unittest.TestCase.assertRaises 43 | assert_true = unittest.TestCase.assertTrue 44 | 45 | @contextmanager 46 | def assert_creates_file(self, path): 47 | self.assert_true(not os.path.exists(path), '{} should not exist'.format(path)) 48 | yield 49 | self.assert_true(os.path.exists(path), '{} should exist'.format(path)) 50 | 51 | def assert_dirs_equal(self, dir1, dir2): 52 | diff = filecmp.dircmp(dir1, dir2) 53 | self._assert_dirs_equal_cmp(diff) 54 | 55 | def _assert_dirs_equal_cmp(self, diff): 56 | self.assert_equal(diff.left_only, []) 57 | self.assert_equal(diff.right_only, []) 58 | self.assert_equal(diff.diff_files, []) 59 | self.assert_equal(diff.funny_files, []) 60 | for filename in diff.common: 61 | path1 = os.path.join(diff.left, filename) 62 | path2 = os.path.join(diff.right, filename) 63 | s1 = os.lstat(path1) 64 | s2 = os.lstat(path2) 65 | # Assume path2 is on FUSE if st_dev is different 66 | fuse = s1.st_dev != s2.st_dev 67 | attrs = ['st_mode', 'st_uid', 'st_gid', 'st_rdev'] 68 | if has_lchflags: 69 | attrs.append('st_flags') 70 | if not fuse or not os.path.isdir(path1): 71 | # dir nlink is always 1 on our fuse fileystem 72 | attrs.append('st_nlink') 73 | d1 = [filename] + [getattr(s1, a) for a in attrs] 74 | d2 = [filename] + [getattr(s2, a) for a in attrs] 75 | if not os.path.islink(path1) or utime_supports_fd: 76 | # Older versions of llfuse does not support ns precision properly 77 | if fuse and not have_fuse_mtime_ns: 78 | d1.append(round(st_mtime_ns(s1), -4)) 79 | d2.append(round(st_mtime_ns(s2), -4)) 80 | d1.append(round(st_mtime_ns(s1), st_mtime_ns_round)) 81 | d2.append(round(st_mtime_ns(s2), st_mtime_ns_round)) 82 | d1.append(get_all(path1, follow_symlinks=False)) 83 | d2.append(get_all(path2, follow_symlinks=False)) 84 | self.assert_equal(d1, d2) 85 | for sub_diff in diff.subdirs.values(): 86 | self._assert_dirs_equal_cmp(sub_diff) 87 | 88 | def wait_for_mount(self, path, timeout=5): 89 | """Wait until a filesystem is mounted on `path` 90 | """ 91 | timeout += time.time() 92 | while timeout > time.time(): 93 | if os.path.ismount(path): 94 | return 95 | time.sleep(.1) 96 | raise Exception('wait_for_mount(%s) timeout' % path) 97 | 98 | 99 | def get_tests(suite): 100 | """Generates a sequence of tests from a test suite 101 | """ 102 | for item in suite: 103 | try: 104 | # TODO: This could be "yield from..." with Python 3.3+ 105 | for i in get_tests(item): 106 | yield i 107 | except TypeError: 108 | yield item 109 | 110 | 111 | class TestLoader(unittest.TestLoader): 112 | """A customzied test loader that properly detects and filters our test cases 113 | """ 114 | 115 | def loadTestsFromName(self, pattern, module=None): 116 | suite = self.discover('attic.testsuite', '*.py') 117 | tests = unittest.TestSuite() 118 | for test in get_tests(suite): 119 | if pattern.lower() in test.id().lower(): 120 | tests.addTest(test) 121 | return tests 122 | 123 | 124 | -------------------------------------------------------------------------------- /attic/testsuite/archive.py: -------------------------------------------------------------------------------- 1 | import msgpack 2 | from attic.testsuite import AtticTestCase 3 | from attic.archive import CacheChunkBuffer, RobustUnpacker 4 | from attic.key import PlaintextKey 5 | 6 | 7 | class MockCache: 8 | 9 | def __init__(self): 10 | self.objects = {} 11 | 12 | def add_chunk(self, id, data, stats=None): 13 | self.objects[id] = data 14 | return id, len(data), len(data) 15 | 16 | 17 | class ChunkBufferTestCase(AtticTestCase): 18 | 19 | def test(self): 20 | data = [{b'foo': 1}, {b'bar': 2}] 21 | cache = MockCache() 22 | key = PlaintextKey() 23 | chunks = CacheChunkBuffer(cache, key, None) 24 | for d in data: 25 | chunks.add(d) 26 | chunks.flush() 27 | chunks.flush(flush=True) 28 | self.assert_equal(len(chunks.chunks), 2) 29 | unpacker = msgpack.Unpacker() 30 | for id in chunks.chunks: 31 | unpacker.feed(cache.objects[id]) 32 | self.assert_equal(data, list(unpacker)) 33 | 34 | 35 | class RobustUnpackerTestCase(AtticTestCase): 36 | 37 | def make_chunks(self, items): 38 | return b''.join(msgpack.packb({'path': item}) for item in items) 39 | 40 | def _validator(self, value): 41 | return isinstance(value, dict) and value.get(b'path') in (b'foo', b'bar', b'boo', b'baz') 42 | 43 | def process(self, input): 44 | unpacker = RobustUnpacker(validator=self._validator) 45 | result = [] 46 | for should_sync, chunks in input: 47 | if should_sync: 48 | unpacker.resync() 49 | for data in chunks: 50 | unpacker.feed(data) 51 | for item in unpacker: 52 | result.append(item) 53 | return result 54 | 55 | def test_extra_garbage_no_sync(self): 56 | chunks = [(False, [self.make_chunks([b'foo', b'bar'])]), 57 | (False, [b'garbage'] + [self.make_chunks([b'boo', b'baz'])])] 58 | result = self.process(chunks) 59 | self.assert_equal(result, [ 60 | {b'path': b'foo'}, {b'path': b'bar'}, 61 | 103, 97, 114, 98, 97, 103, 101, 62 | {b'path': b'boo'}, 63 | {b'path': b'baz'}]) 64 | 65 | def split(self, left, length): 66 | parts = [] 67 | while left: 68 | parts.append(left[:length]) 69 | left = left[length:] 70 | return parts 71 | 72 | def test_correct_stream(self): 73 | chunks = self.split(self.make_chunks([b'foo', b'bar', b'boo', b'baz']), 2) 74 | input = [(False, chunks)] 75 | result = self.process(input) 76 | self.assert_equal(result, [{b'path': b'foo'}, {b'path': b'bar'}, {b'path': b'boo'}, {b'path': b'baz'}]) 77 | 78 | def test_missing_chunk(self): 79 | chunks = self.split(self.make_chunks([b'foo', b'bar', b'boo', b'baz']), 4) 80 | input = [(False, chunks[:3]), (True, chunks[4:])] 81 | result = self.process(input) 82 | self.assert_equal(result, [{b'path': b'foo'}, {b'path': b'boo'}, {b'path': b'baz'}]) 83 | 84 | def test_corrupt_chunk(self): 85 | chunks = self.split(self.make_chunks([b'foo', b'bar', b'boo', b'baz']), 4) 86 | input = [(False, chunks[:3]), (True, [b'gar', b'bage'] + chunks[3:])] 87 | result = self.process(input) 88 | self.assert_equal(result, [{b'path': b'foo'}, {b'path': b'boo'}, {b'path': b'baz'}]) 89 | -------------------------------------------------------------------------------- /attic/testsuite/chunker.py: -------------------------------------------------------------------------------- 1 | from attic.chunker import Chunker, buzhash, buzhash_update 2 | from attic.testsuite import AtticTestCase 3 | from io import BytesIO 4 | 5 | 6 | class ChunkerTestCase(AtticTestCase): 7 | 8 | def test_chunkify(self): 9 | data = b'0' * 1024 * 1024 * 15 + b'Y' 10 | parts = [bytes(c) for c in Chunker(2, 0x3, 2, 0).chunkify(BytesIO(data))] 11 | self.assert_equal(len(parts), 2) 12 | self.assert_equal(b''.join(parts), data) 13 | self.assert_equal([bytes(c) for c in Chunker(2, 0x3, 2, 0).chunkify(BytesIO(b''))], []) 14 | self.assert_equal([bytes(c) for c in Chunker(2, 0x3, 2, 0).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'fooba', b'rboobaz', b'fooba', b'rboobaz', b'fooba', b'rboobaz']) 15 | self.assert_equal([bytes(c) for c in Chunker(2, 0x3, 2, 1).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'fo', b'obarb', b'oob', b'azf', b'oobarb', b'oob', b'azf', b'oobarb', b'oobaz']) 16 | self.assert_equal([bytes(c) for c in Chunker(2, 0x3, 2, 2).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foob', b'ar', b'boobazfoob', b'ar', b'boobazfoob', b'ar', b'boobaz']) 17 | self.assert_equal([bytes(c) for c in Chunker(3, 0x3, 3, 0).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobarboobaz' * 3]) 18 | self.assert_equal([bytes(c) for c in Chunker(3, 0x3, 3, 1).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobar', b'boo', b'bazfo', b'obar', b'boo', b'bazfo', b'obar', b'boobaz']) 19 | self.assert_equal([bytes(c) for c in Chunker(3, 0x3, 3, 2).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foo', b'barboobaz', b'foo', b'barboobaz', b'foo', b'barboobaz']) 20 | self.assert_equal([bytes(c) for c in Chunker(3, 0x3, 4, 0).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobarboobaz' * 3]) 21 | self.assert_equal([bytes(c) for c in Chunker(3, 0x3, 4, 1).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foobar', b'boobazfo', b'obar', b'boobazfo', b'obar', b'boobaz']) 22 | self.assert_equal([bytes(c) for c in Chunker(3, 0x3, 4, 2).chunkify(BytesIO(b'foobarboobaz' * 3))], [b'foob', b'arboobaz', b'foob', b'arboobaz', b'foob', b'arboobaz']) 23 | 24 | def test_buzhash(self): 25 | self.assert_equal(buzhash(b'abcdefghijklmnop', 0), 3795437769) 26 | self.assert_equal(buzhash(b'abcdefghijklmnop', 1), 3795400502) 27 | self.assert_equal(buzhash(b'abcdefghijklmnop', 1), buzhash_update(buzhash(b'Xabcdefghijklmno', 1), ord('X'), ord('p'), 16, 1)) 28 | # Test with more than 31 bytes to make sure our barrel_shift macro works correctly 29 | self.assert_equal(buzhash(b'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz', 0), 566521248) 30 | -------------------------------------------------------------------------------- /attic/testsuite/crypto.py: -------------------------------------------------------------------------------- 1 | from binascii import hexlify 2 | from attic.testsuite import AtticTestCase 3 | from attic.crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes, pbkdf2_sha256, get_random_bytes 4 | 5 | 6 | class CryptoTestCase(AtticTestCase): 7 | 8 | def test_bytes_to_int(self): 9 | self.assert_equal(bytes_to_int(b'\0\0\0\1'), 1) 10 | 11 | def test_bytes_to_long(self): 12 | self.assert_equal(bytes_to_long(b'\0\0\0\0\0\0\0\1'), 1) 13 | self.assert_equal(long_to_bytes(1), b'\0\0\0\0\0\0\0\1') 14 | 15 | def test_pbkdf2_sha256(self): 16 | self.assert_equal(hexlify(pbkdf2_sha256(b'password', b'salt', 1, 32)), 17 | b'120fb6cffcf8b32c43e7225256c4f837a86548c92ccc35480805987cb70be17b') 18 | self.assert_equal(hexlify(pbkdf2_sha256(b'password', b'salt', 2, 32)), 19 | b'ae4d0c95af6b46d32d0adff928f06dd02a303f8ef3c251dfd6e2d85a95474c43') 20 | self.assert_equal(hexlify(pbkdf2_sha256(b'password', b'salt', 4096, 32)), 21 | b'c5e478d59288c841aa530db6845c4c8d962893a001ce4e11a4963873aa98134a') 22 | 23 | def test_get_random_bytes(self): 24 | bytes = get_random_bytes(10) 25 | bytes2 = get_random_bytes(10) 26 | self.assert_equal(len(bytes), 10) 27 | self.assert_equal(len(bytes2), 10) 28 | self.assert_not_equal(bytes, bytes2) 29 | 30 | def test_aes(self): 31 | key = b'X' * 32 32 | data = b'foo' * 10 33 | aes = AES(key) 34 | self.assert_equal(bytes_to_long(aes.iv, 8), 0) 35 | cdata = aes.encrypt(data) 36 | self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466') 37 | self.assert_equal(bytes_to_long(aes.iv, 8), 2) 38 | self.assert_not_equal(data, aes.decrypt(cdata)) 39 | aes.reset(iv=b'\0' * 16) 40 | self.assert_equal(data, aes.decrypt(cdata)) 41 | -------------------------------------------------------------------------------- /attic/testsuite/hashindex.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | import tempfile 4 | from attic.hashindex import NSIndex, ChunkIndex 5 | from attic.testsuite import AtticTestCase 6 | 7 | 8 | class HashIndexTestCase(AtticTestCase): 9 | 10 | def _generic_test(self, cls, make_value, sha): 11 | idx = cls() 12 | self.assert_equal(len(idx), 0) 13 | # Test set 14 | for x in range(100): 15 | idx[bytes('%-32d' % x, 'ascii')] = make_value(x) 16 | self.assert_equal(len(idx), 100) 17 | for x in range(100): 18 | self.assert_equal(idx[bytes('%-32d' % x, 'ascii')], make_value(x)) 19 | # Test update 20 | for x in range(100): 21 | idx[bytes('%-32d' % x, 'ascii')] = make_value(x * 2) 22 | self.assert_equal(len(idx), 100) 23 | for x in range(100): 24 | self.assert_equal(idx[bytes('%-32d' % x, 'ascii')], make_value(x * 2)) 25 | # Test delete 26 | for x in range(50): 27 | del idx[bytes('%-32d' % x, 'ascii')] 28 | self.assert_equal(len(idx), 50) 29 | idx_name = tempfile.NamedTemporaryFile() 30 | idx.write(idx_name.name) 31 | del idx 32 | # Verify file contents 33 | with open(idx_name.name, 'rb') as fd: 34 | self.assert_equal(hashlib.sha256(fd.read()).hexdigest(), sha) 35 | # Make sure we can open the file 36 | idx = cls.read(idx_name.name) 37 | self.assert_equal(len(idx), 50) 38 | for x in range(50, 100): 39 | self.assert_equal(idx[bytes('%-32d' % x, 'ascii')], make_value(x * 2)) 40 | idx.clear() 41 | self.assert_equal(len(idx), 0) 42 | idx.write(idx_name.name) 43 | del idx 44 | self.assert_equal(len(cls.read(idx_name.name)), 0) 45 | 46 | def test_nsindex(self): 47 | self._generic_test(NSIndex, lambda x: (x, x), '369a18ae6a52524eb2884a3c0fdc2824947edd017a2688c5d4d7b3510c245ab9') 48 | 49 | def test_chunkindex(self): 50 | self._generic_test(ChunkIndex, lambda x: (x, x, x), 'ed22e8a883400453c0ee79a06c54df72c994a54eeefdc6c0989efdc5ee6d07b7') 51 | 52 | def test_resize(self): 53 | n = 2000 # Must be >= MIN_BUCKETS 54 | idx_name = tempfile.NamedTemporaryFile() 55 | idx = NSIndex() 56 | idx.write(idx_name.name) 57 | initial_size = os.path.getsize(idx_name.name) 58 | self.assert_equal(len(idx), 0) 59 | for x in range(n): 60 | idx[bytes('%-32d' % x, 'ascii')] = x, x 61 | idx.write(idx_name.name) 62 | self.assert_true(initial_size < os.path.getsize(idx_name.name)) 63 | for x in range(n): 64 | del idx[bytes('%-32d' % x, 'ascii')] 65 | self.assert_equal(len(idx), 0) 66 | idx.write(idx_name.name) 67 | self.assert_equal(initial_size, os.path.getsize(idx_name.name)) 68 | 69 | def test_iteritems(self): 70 | idx = NSIndex() 71 | for x in range(100): 72 | idx[bytes('%-0.32d' % x, 'ascii')] = x, x 73 | all = list(idx.iteritems()) 74 | self.assert_equal(len(all), 100) 75 | second_half = list(idx.iteritems(marker=all[49][0])) 76 | self.assert_equal(len(second_half), 50) 77 | self.assert_equal(second_half, all[50:]) 78 | -------------------------------------------------------------------------------- /attic/testsuite/helpers.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from time import mktime, strptime 3 | from datetime import datetime, timezone, timedelta 4 | import os 5 | import tempfile 6 | import unittest 7 | from attic.helpers import adjust_patterns, exclude_path, Location, format_timedelta, IncludePattern, ExcludePattern, make_path_safe, UpgradableLock, prune_within, prune_split, to_localtime, \ 8 | StableDict, int_to_bigint, bigint_to_int, parse_timestamp 9 | from attic.testsuite import AtticTestCase 10 | import msgpack 11 | 12 | 13 | class BigIntTestCase(AtticTestCase): 14 | 15 | def test_bigint(self): 16 | self.assert_equal(int_to_bigint(0), 0) 17 | self.assert_equal(int_to_bigint(2**63-1), 2**63-1) 18 | self.assert_equal(int_to_bigint(-2**63+1), -2**63+1) 19 | self.assert_equal(int_to_bigint(2**63), b'\x00\x00\x00\x00\x00\x00\x00\x80\x00') 20 | self.assert_equal(int_to_bigint(-2**63), b'\x00\x00\x00\x00\x00\x00\x00\x80\xff') 21 | self.assert_equal(bigint_to_int(int_to_bigint(-2**70)), -2**70) 22 | self.assert_equal(bigint_to_int(int_to_bigint(2**70)), 2**70) 23 | 24 | 25 | class LocationTestCase(AtticTestCase): 26 | 27 | def test(self): 28 | self.assert_equal( 29 | repr(Location('ssh://user@host:1234/some/path::archive')), 30 | "Location(proto='ssh', user='user', host='host', port=1234, path='/some/path', archive='archive')" 31 | ) 32 | self.assert_equal( 33 | repr(Location('file:///some/path::archive')), 34 | "Location(proto='file', user=None, host=None, port=None, path='/some/path', archive='archive')" 35 | ) 36 | self.assert_equal( 37 | repr(Location('user@host:/some/path::archive')), 38 | "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive='archive')" 39 | ) 40 | self.assert_equal( 41 | repr(Location('mybackup.attic::archive')), 42 | "Location(proto='file', user=None, host=None, port=None, path='mybackup.attic', archive='archive')" 43 | ) 44 | self.assert_equal( 45 | repr(Location('/some/absolute/path::archive')), 46 | "Location(proto='file', user=None, host=None, port=None, path='/some/absolute/path', archive='archive')" 47 | ) 48 | self.assert_equal( 49 | repr(Location('some/relative/path::archive')), 50 | "Location(proto='file', user=None, host=None, port=None, path='some/relative/path', archive='archive')" 51 | ) 52 | self.assert_raises(ValueError, lambda: Location('ssh://localhost:22/path:archive')) 53 | 54 | def test_canonical_path(self): 55 | locations = ['some/path::archive', 'file://some/path::archive', 'host:some/path::archive', 56 | 'host:~user/some/path::archive', 'ssh://host/some/path::archive', 57 | 'ssh://user@host:1234/some/path::archive'] 58 | for location in locations: 59 | self.assert_equal(Location(location).canonical_path(), 60 | Location(Location(location).canonical_path()).canonical_path()) 61 | 62 | 63 | class FormatTimedeltaTestCase(AtticTestCase): 64 | 65 | def test(self): 66 | t0 = datetime(2001, 1, 1, 10, 20, 3, 0) 67 | t1 = datetime(2001, 1, 1, 12, 20, 4, 100000) 68 | self.assert_equal( 69 | format_timedelta(t1 - t0), 70 | '2 hours 1.10 seconds' 71 | ) 72 | 73 | 74 | class PatternTestCase(AtticTestCase): 75 | 76 | files = [ 77 | '/etc/passwd', '/etc/hosts', '/home', 78 | '/home/user/.profile', '/home/user/.bashrc', 79 | '/home/user2/.profile', '/home/user2/public_html/index.html', 80 | '/var/log/messages', '/var/log/dmesg', 81 | ] 82 | 83 | def evaluate(self, paths, excludes): 84 | patterns = adjust_patterns(paths, [ExcludePattern(p) for p in excludes]) 85 | return [path for path in self.files if not exclude_path(path, patterns)] 86 | 87 | def test(self): 88 | self.assert_equal(self.evaluate(['/'], []), self.files) 89 | self.assert_equal(self.evaluate([], []), self.files) 90 | self.assert_equal(self.evaluate(['/'], ['/h']), self.files) 91 | self.assert_equal(self.evaluate(['/'], ['/home']), 92 | ['/etc/passwd', '/etc/hosts', '/var/log/messages', '/var/log/dmesg']) 93 | self.assert_equal(self.evaluate(['/'], ['/home/']), 94 | ['/etc/passwd', '/etc/hosts', '/home', '/var/log/messages', '/var/log/dmesg']) 95 | self.assert_equal(self.evaluate(['/home/u'], []), []) 96 | self.assert_equal(self.evaluate(['/', '/home', '/etc/hosts'], ['/']), []) 97 | self.assert_equal(self.evaluate(['/home/'], ['/home/user2']), 98 | ['/home', '/home/user/.profile', '/home/user/.bashrc']) 99 | self.assert_equal(self.evaluate(['/'], ['*.profile', '/var/log']), 100 | ['/etc/passwd', '/etc/hosts', '/home', '/home/user/.bashrc', '/home/user2/public_html/index.html']) 101 | self.assert_equal(self.evaluate(['/'], ['/home/*/public_html', '*.profile', '*/log/*']), 102 | ['/etc/passwd', '/etc/hosts', '/home', '/home/user/.bashrc']) 103 | self.assert_equal(self.evaluate(['/etc/', '/var'], ['dmesg']), 104 | ['/etc/passwd', '/etc/hosts', '/var/log/messages', '/var/log/dmesg']) 105 | 106 | 107 | class MakePathSafeTestCase(AtticTestCase): 108 | 109 | def test(self): 110 | self.assert_equal(make_path_safe('/foo/bar'), 'foo/bar') 111 | self.assert_equal(make_path_safe('/foo/bar'), 'foo/bar') 112 | self.assert_equal(make_path_safe('/f/bar'), 'f/bar') 113 | self.assert_equal(make_path_safe('fo/bar'), 'fo/bar') 114 | self.assert_equal(make_path_safe('../foo/bar'), 'foo/bar') 115 | self.assert_equal(make_path_safe('../../foo/bar'), 'foo/bar') 116 | self.assert_equal(make_path_safe('/'), '.') 117 | self.assert_equal(make_path_safe('/'), '.') 118 | 119 | class UpgradableLockTestCase(AtticTestCase): 120 | 121 | def test(self): 122 | file = tempfile.NamedTemporaryFile() 123 | lock = UpgradableLock(file.name) 124 | lock.upgrade() 125 | lock.upgrade() 126 | lock.release() 127 | 128 | @unittest.skipIf(os.getuid() == 0, 'Root can always open files for writing') 129 | def test_read_only_lock_file(self): 130 | file = tempfile.NamedTemporaryFile() 131 | os.chmod(file.name, 0o444) 132 | lock = UpgradableLock(file.name) 133 | self.assert_raises(UpgradableLock.WriteLockFailed, lock.upgrade) 134 | lock.release() 135 | 136 | 137 | class MockArchive(object): 138 | 139 | def __init__(self, ts): 140 | self.ts = ts 141 | 142 | def __repr__(self): 143 | return repr(self.ts) 144 | 145 | 146 | class PruneSplitTestCase(AtticTestCase): 147 | 148 | def test(self): 149 | 150 | def local_to_UTC(month, day): 151 | 'Convert noon on the month and day in 2013 to UTC.' 152 | seconds = mktime(strptime('2013-%02d-%02d 12:00' % (month, day), '%Y-%m-%d %H:%M')) 153 | return datetime.fromtimestamp(seconds, tz=timezone.utc) 154 | 155 | def subset(lst, indices): 156 | return {lst[i] for i in indices} 157 | 158 | def dotest(test_archives, n, skip, indices): 159 | for ta in test_archives, reversed(test_archives): 160 | self.assert_equal(set(prune_split(ta, '%Y-%m', n, skip)), 161 | subset(test_archives, indices)) 162 | 163 | test_pairs = [(1,1), (2,1), (2,28), (3,1), (3,2), (3,31), (5,1)] 164 | test_dates = [local_to_UTC(month, day) for month, day in test_pairs] 165 | test_archives = [MockArchive(date) for date in test_dates] 166 | 167 | dotest(test_archives, 3, [], [6, 5, 2]) 168 | dotest(test_archives, -1, [], [6, 5, 2, 0]) 169 | dotest(test_archives, 3, [test_archives[6]], [5, 2, 0]) 170 | dotest(test_archives, 3, [test_archives[5]], [6, 2, 0]) 171 | dotest(test_archives, 3, [test_archives[4]], [6, 5, 2]) 172 | dotest(test_archives, 0, [], []) 173 | 174 | 175 | class PruneWithinTestCase(AtticTestCase): 176 | 177 | def test(self): 178 | 179 | def subset(lst, indices): 180 | return {lst[i] for i in indices} 181 | 182 | def dotest(test_archives, within, indices): 183 | for ta in test_archives, reversed(test_archives): 184 | self.assert_equal(set(prune_within(ta, within)), 185 | subset(test_archives, indices)) 186 | 187 | # 1 minute, 1.5 hours, 2.5 hours, 3.5 hours, 25 hours, 49 hours 188 | test_offsets = [60, 90*60, 150*60, 210*60, 25*60*60, 49*60*60] 189 | now = datetime.now(timezone.utc) 190 | test_dates = [now - timedelta(seconds=s) for s in test_offsets] 191 | test_archives = [MockArchive(date) for date in test_dates] 192 | 193 | dotest(test_archives, '1H', [0]) 194 | dotest(test_archives, '2H', [0, 1]) 195 | dotest(test_archives, '3H', [0, 1, 2]) 196 | dotest(test_archives, '24H', [0, 1, 2, 3]) 197 | dotest(test_archives, '26H', [0, 1, 2, 3, 4]) 198 | dotest(test_archives, '2d', [0, 1, 2, 3, 4]) 199 | dotest(test_archives, '50H', [0, 1, 2, 3, 4, 5]) 200 | dotest(test_archives, '3d', [0, 1, 2, 3, 4, 5]) 201 | dotest(test_archives, '1w', [0, 1, 2, 3, 4, 5]) 202 | dotest(test_archives, '1m', [0, 1, 2, 3, 4, 5]) 203 | dotest(test_archives, '1y', [0, 1, 2, 3, 4, 5]) 204 | 205 | 206 | class StableDictTestCase(AtticTestCase): 207 | 208 | def test(self): 209 | d = StableDict(foo=1, bar=2, boo=3, baz=4) 210 | self.assert_equal(list(d.items()), [('bar', 2), ('baz', 4), ('boo', 3), ('foo', 1)]) 211 | self.assert_equal(hashlib.md5(msgpack.packb(d)).hexdigest(), 'fc78df42cd60691b3ac3dd2a2b39903f') 212 | 213 | 214 | class TestParseTimestamp(AtticTestCase): 215 | 216 | def test(self): 217 | self.assert_equal(parse_timestamp('2015-04-19T20:25:00.226410'), datetime(2015, 4, 19, 20, 25, 0, 226410, timezone.utc)) 218 | self.assert_equal(parse_timestamp('2015-04-19T20:25:00'), datetime(2015, 4, 19, 20, 25, 0, 0, timezone.utc)) 219 | -------------------------------------------------------------------------------- /attic/testsuite/key.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import shutil 4 | import tempfile 5 | from binascii import hexlify 6 | from attic.crypto import bytes_to_long, num_aes_blocks 7 | from attic.testsuite import AtticTestCase 8 | from attic.key import PlaintextKey, PassphraseKey, KeyfileKey 9 | from attic.helpers import Location, unhexlify 10 | 11 | 12 | class KeyTestCase(AtticTestCase): 13 | 14 | class MockArgs(object): 15 | repository = Location(tempfile.mkstemp()[1]) 16 | 17 | keyfile2_key_file = """ 18 | ATTIC KEY 0000000000000000000000000000000000000000000000000000000000000000 19 | hqppdGVyYXRpb25zzgABhqCkaGFzaNoAIMyonNI+7Cjv0qHi0AOBM6bLGxACJhfgzVD2oq 20 | bIS9SFqWFsZ29yaXRobaZzaGEyNTakc2FsdNoAINNK5qqJc1JWSUjACwFEWGTdM7Nd0a5l 21 | 1uBGPEb+9XM9p3ZlcnNpb24BpGRhdGHaANAYDT5yfPpU099oBJwMomsxouKyx/OG4QIXK2 22 | hQCG2L2L/9PUu4WIuKvGrsXoP7syemujNfcZws5jLp2UPva4PkQhQsrF1RYDEMLh2eF9Ol 23 | rwtkThq1tnh7KjWMG9Ijt7/aoQtq0zDYP/xaFF8XXSJxiyP5zjH5+spB6RL0oQHvbsliSh 24 | /cXJq7jrqmrJ1phd6dg4SHAM/i+hubadZoS6m25OQzYAW09wZD/phG8OVa698Z5ed3HTaT 25 | SmrtgJL3EoOKgUI9d6BLE4dJdBqntifo""".strip() 26 | 27 | keyfile2_cdata = unhexlify(re.sub('\W', '', """ 28 | 0055f161493fcfc16276e8c31493c4641e1eb19a79d0326fad0291e5a9c98e5933 29 | 00000000000003e8d21eaf9b86c297a8cd56432e1915bb 30 | """)) 31 | keyfile2_id = unhexlify('c3fbf14bc001ebcc3cd86e696c13482ed071740927cd7cbe1b01b4bfcee49314') 32 | 33 | def setUp(self): 34 | self.tmppath = tempfile.mkdtemp() 35 | os.environ['ATTIC_KEYS_DIR'] = self.tmppath 36 | 37 | def tearDown(self): 38 | shutil.rmtree(self.tmppath) 39 | 40 | class MockRepository(object): 41 | class _Location(object): 42 | orig = '/some/place' 43 | 44 | _location = _Location() 45 | id = bytes(32) 46 | 47 | def test_plaintext(self): 48 | key = PlaintextKey.create(None, None) 49 | data = b'foo' 50 | self.assert_equal(hexlify(key.id_hash(data)), b'2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae') 51 | self.assert_equal(data, key.decrypt(key.id_hash(data), key.encrypt(data))) 52 | 53 | def test_keyfile(self): 54 | os.environ['ATTIC_PASSPHRASE'] = 'test' 55 | key = KeyfileKey.create(self.MockRepository(), self.MockArgs()) 56 | self.assert_equal(bytes_to_long(key.enc_cipher.iv, 8), 0) 57 | manifest = key.encrypt(b'XXX') 58 | self.assert_equal(key.extract_nonce(manifest), 0) 59 | manifest2 = key.encrypt(b'XXX') 60 | self.assert_not_equal(manifest, manifest2) 61 | self.assert_equal(key.decrypt(None, manifest), key.decrypt(None, manifest2)) 62 | self.assert_equal(key.extract_nonce(manifest2), 1) 63 | iv = key.extract_nonce(manifest) 64 | key2 = KeyfileKey.detect(self.MockRepository(), manifest) 65 | self.assert_equal(bytes_to_long(key2.enc_cipher.iv, 8), iv + num_aes_blocks(len(manifest) - KeyfileKey.PAYLOAD_OVERHEAD)) 66 | # Key data sanity check 67 | self.assert_equal(len(set([key2.id_key, key2.enc_key, key2.enc_hmac_key])), 3) 68 | self.assert_equal(key2.chunk_seed == 0, False) 69 | data = b'foo' 70 | self.assert_equal(data, key2.decrypt(key.id_hash(data), key.encrypt(data))) 71 | 72 | def test_keyfile2(self): 73 | with open(os.path.join(os.environ['ATTIC_KEYS_DIR'], 'keyfile'), 'w') as fd: 74 | fd.write(self.keyfile2_key_file) 75 | os.environ['ATTIC_PASSPHRASE'] = 'passphrase' 76 | key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata) 77 | self.assert_equal(key.decrypt(self.keyfile2_id, self.keyfile2_cdata), b'payload') 78 | 79 | def test_passphrase(self): 80 | os.environ['ATTIC_PASSPHRASE'] = 'test' 81 | key = PassphraseKey.create(self.MockRepository(), None) 82 | self.assert_equal(bytes_to_long(key.enc_cipher.iv, 8), 0) 83 | self.assert_equal(hexlify(key.id_key), b'793b0717f9d8fb01c751a487e9b827897ceea62409870600013fbc6b4d8d7ca6') 84 | self.assert_equal(hexlify(key.enc_hmac_key), b'b885a05d329a086627412a6142aaeb9f6c54ab7950f996dd65587251f6bc0901') 85 | self.assert_equal(hexlify(key.enc_key), b'2ff3654c6daf7381dbbe718d2b20b4f1ea1e34caa6cc65f6bb3ac376b93fed2a') 86 | self.assert_equal(key.chunk_seed, -775740477) 87 | manifest = key.encrypt(b'XXX') 88 | self.assert_equal(key.extract_nonce(manifest), 0) 89 | manifest2 = key.encrypt(b'XXX') 90 | self.assert_not_equal(manifest, manifest2) 91 | self.assert_equal(key.decrypt(None, manifest), key.decrypt(None, manifest2)) 92 | self.assert_equal(key.extract_nonce(manifest2), 1) 93 | iv = key.extract_nonce(manifest) 94 | key2 = PassphraseKey.detect(self.MockRepository(), manifest) 95 | self.assert_equal(bytes_to_long(key2.enc_cipher.iv, 8), iv + num_aes_blocks(len(manifest) - PassphraseKey.PAYLOAD_OVERHEAD)) 96 | self.assert_equal(key.id_key, key2.id_key) 97 | self.assert_equal(key.enc_hmac_key, key2.enc_hmac_key) 98 | self.assert_equal(key.enc_key, key2.enc_key) 99 | self.assert_equal(key.chunk_seed, key2.chunk_seed) 100 | data = b'foo' 101 | self.assert_equal(hexlify(key.id_hash(data)), b'818217cf07d37efad3860766dcdf1d21e401650fed2d76ed1d797d3aae925990') 102 | self.assert_equal(data, key2.decrypt(key2.id_hash(data), key.encrypt(data))) 103 | -------------------------------------------------------------------------------- /attic/testsuite/lrucache.py: -------------------------------------------------------------------------------- 1 | from attic.lrucache import LRUCache 2 | from attic.testsuite import AtticTestCase 3 | 4 | 5 | class LRUCacheTestCase(AtticTestCase): 6 | 7 | def test(self): 8 | c = LRUCache(2) 9 | self.assert_equal(len(c), 0) 10 | for i, x in enumerate('abc'): 11 | c[x] = i 12 | self.assert_equal(len(c), 2) 13 | self.assert_equal(set(c), set(['b', 'c'])) 14 | self.assert_equal(set(c.items()), set([('b', 1), ('c', 2)])) 15 | self.assert_equal(False, 'a' in c) 16 | self.assert_equal(True, 'b' in c) 17 | self.assert_raises(KeyError, lambda: c['a']) 18 | self.assert_equal(c['b'], 1) 19 | self.assert_equal(c['c'], 2) 20 | c['d'] = 3 21 | self.assert_equal(len(c), 2) 22 | self.assert_equal(c['c'], 2) 23 | self.assert_equal(c['d'], 3) 24 | c['c'] = 22 25 | c['e'] = 4 26 | self.assert_equal(len(c), 2) 27 | self.assert_raises(KeyError, lambda: c['d']) 28 | self.assert_equal(c['c'], 22) 29 | self.assert_equal(c['e'], 4) 30 | del c['c'] 31 | self.assert_equal(len(c), 1) 32 | self.assert_raises(KeyError, lambda: c['c']) 33 | self.assert_equal(c['e'], 4) 34 | 35 | def test_pop(self): 36 | c = LRUCache(2) 37 | c[1] = 1 38 | c[2] = 2 39 | c.pop(1) 40 | c[3] = 3 41 | -------------------------------------------------------------------------------- /attic/testsuite/mock.py: -------------------------------------------------------------------------------- 1 | try: 2 | # Only available in python 3.3+ 3 | from unittest.mock import * 4 | except ImportError: 5 | from mock import * 6 | -------------------------------------------------------------------------------- /attic/testsuite/platform.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import sys 4 | import tempfile 5 | import unittest 6 | from attic.platform import acl_get, acl_set 7 | from attic.testsuite import AtticTestCase 8 | 9 | 10 | ACCESS_ACL = """ 11 | user::rw- 12 | user:root:rw-:0 13 | user:9999:r--:9999 14 | group::r-- 15 | group:root:r--:0 16 | group:9999:r--:9999 17 | mask::rw- 18 | other::r-- 19 | """.strip().encode('ascii') 20 | 21 | DEFAULT_ACL = """ 22 | user::rw- 23 | user:root:r--:0 24 | user:8888:r--:8888 25 | group::r-- 26 | group:root:r--:0 27 | group:8888:r--:8888 28 | mask::rw- 29 | other::r-- 30 | """.strip().encode('ascii') 31 | 32 | 33 | def fakeroot_detected(): 34 | return 'FAKEROOTKEY' in os.environ 35 | 36 | 37 | @unittest.skipUnless(sys.platform.startswith('linux'), 'linux only test') 38 | @unittest.skipIf(fakeroot_detected(), 'not compatible with fakeroot') 39 | class PlatformLinuxTestCase(AtticTestCase): 40 | 41 | def setUp(self): 42 | self.tmpdir = tempfile.mkdtemp() 43 | 44 | def tearDown(self): 45 | shutil.rmtree(self.tmpdir) 46 | 47 | def get_acl(self, path, numeric_owner=False): 48 | item = {} 49 | acl_get(path, item, os.stat(path), numeric_owner=numeric_owner) 50 | return item 51 | 52 | def set_acl(self, path, access=None, default=None, numeric_owner=False): 53 | item = {b'acl_access': access, b'acl_default': default} 54 | acl_set(path, item, numeric_owner=numeric_owner) 55 | 56 | def test_access_acl(self): 57 | file = tempfile.NamedTemporaryFile() 58 | self.assert_equal(self.get_acl(file.name), {}) 59 | self.set_acl(file.name, access=b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n', numeric_owner=False) 60 | self.assert_in(b'user:root:rw-:0', self.get_acl(file.name)[b'acl_access']) 61 | self.assert_in(b'group:root:rw-:0', self.get_acl(file.name)[b'acl_access']) 62 | self.assert_in(b'user:0:rw-:0', self.get_acl(file.name, numeric_owner=True)[b'acl_access']) 63 | file2 = tempfile.NamedTemporaryFile() 64 | self.set_acl(file2.name, access=b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n', numeric_owner=True) 65 | self.assert_in(b'user:9999:rw-:9999', self.get_acl(file2.name)[b'acl_access']) 66 | self.assert_in(b'group:9999:rw-:9999', self.get_acl(file2.name)[b'acl_access']) 67 | 68 | def test_default_acl(self): 69 | self.assert_equal(self.get_acl(self.tmpdir), {}) 70 | self.set_acl(self.tmpdir, access=ACCESS_ACL, default=DEFAULT_ACL) 71 | self.assert_equal(self.get_acl(self.tmpdir)[b'acl_access'], ACCESS_ACL) 72 | self.assert_equal(self.get_acl(self.tmpdir)[b'acl_default'], DEFAULT_ACL) 73 | 74 | 75 | @unittest.skipUnless(sys.platform.startswith('darwin'), 'OS X only test') 76 | @unittest.skipIf(fakeroot_detected(), 'not compatible with fakeroot') 77 | class PlatformDarwinTestCase(AtticTestCase): 78 | 79 | def setUp(self): 80 | self.tmpdir = tempfile.mkdtemp() 81 | 82 | def tearDown(self): 83 | shutil.rmtree(self.tmpdir) 84 | 85 | def get_acl(self, path, numeric_owner=False): 86 | item = {} 87 | acl_get(path, item, os.stat(path), numeric_owner=numeric_owner) 88 | return item 89 | 90 | def set_acl(self, path, acl, numeric_owner=False): 91 | item = {b'acl_extended': acl} 92 | acl_set(path, item, numeric_owner=numeric_owner) 93 | 94 | def test_access_acl(self): 95 | file = tempfile.NamedTemporaryFile() 96 | file2 = tempfile.NamedTemporaryFile() 97 | self.assert_equal(self.get_acl(file.name), {}) 98 | self.set_acl(file.name, b'!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n', numeric_owner=False) 99 | self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000014:staff:20:allow:read', self.get_acl(file.name)[b'acl_extended']) 100 | self.assert_in(b'user:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read', self.get_acl(file.name)[b'acl_extended']) 101 | self.set_acl(file2.name, b'!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n', numeric_owner=True) 102 | self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:wheel:0:allow:read', self.get_acl(file2.name)[b'acl_extended']) 103 | self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000::0:allow:read', self.get_acl(file2.name, numeric_owner=True)[b'acl_extended']) 104 | 105 | -------------------------------------------------------------------------------- /attic/testsuite/repository.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import tempfile 4 | from attic.testsuite.mock import patch 5 | from attic.hashindex import NSIndex 6 | from attic.helpers import Location, IntegrityError, UpgradableLock 7 | from attic.remote import RemoteRepository 8 | from attic.repository import Repository 9 | from attic.testsuite import AtticTestCase 10 | 11 | 12 | class RepositoryTestCaseBase(AtticTestCase): 13 | 14 | def open(self, create=False): 15 | return Repository(os.path.join(self.tmppath, 'repository'), create=create) 16 | 17 | def setUp(self): 18 | self.tmppath = tempfile.mkdtemp() 19 | self.repository = self.open(create=True) 20 | 21 | def tearDown(self): 22 | self.repository.close() 23 | shutil.rmtree(self.tmppath) 24 | 25 | def reopen(self): 26 | if self.repository: 27 | self.repository.close() 28 | self.repository = self.open() 29 | 30 | 31 | class RepositoryTestCase(RepositoryTestCaseBase): 32 | 33 | def test1(self): 34 | for x in range(100): 35 | self.repository.put(('%-32d' % x).encode('ascii'), b'SOMEDATA') 36 | key50 = ('%-32d' % 50).encode('ascii') 37 | self.assert_equal(self.repository.get(key50), b'SOMEDATA') 38 | self.repository.delete(key50) 39 | self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50)) 40 | self.repository.commit() 41 | self.repository.close() 42 | repository2 = self.open() 43 | self.assert_raises(Repository.ObjectNotFound, lambda: repository2.get(key50)) 44 | for x in range(100): 45 | if x == 50: 46 | continue 47 | self.assert_equal(repository2.get(('%-32d' % x).encode('ascii')), b'SOMEDATA') 48 | repository2.close() 49 | 50 | def test2(self): 51 | """Test multiple sequential transactions 52 | """ 53 | self.repository.put(b'00000000000000000000000000000000', b'foo') 54 | self.repository.put(b'00000000000000000000000000000001', b'foo') 55 | self.repository.commit() 56 | self.repository.delete(b'00000000000000000000000000000000') 57 | self.repository.put(b'00000000000000000000000000000001', b'bar') 58 | self.repository.commit() 59 | self.assert_equal(self.repository.get(b'00000000000000000000000000000001'), b'bar') 60 | 61 | def test_consistency(self): 62 | """Test cache consistency 63 | """ 64 | self.repository.put(b'00000000000000000000000000000000', b'foo') 65 | self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo') 66 | self.repository.put(b'00000000000000000000000000000000', b'foo2') 67 | self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo2') 68 | self.repository.put(b'00000000000000000000000000000000', b'bar') 69 | self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'bar') 70 | self.repository.delete(b'00000000000000000000000000000000') 71 | self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(b'00000000000000000000000000000000')) 72 | 73 | def test_consistency2(self): 74 | """Test cache consistency2 75 | """ 76 | self.repository.put(b'00000000000000000000000000000000', b'foo') 77 | self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo') 78 | self.repository.commit() 79 | self.repository.put(b'00000000000000000000000000000000', b'foo2') 80 | self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo2') 81 | self.repository.rollback() 82 | self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo') 83 | 84 | def test_overwrite_in_same_transaction(self): 85 | """Test cache consistency2 86 | """ 87 | self.repository.put(b'00000000000000000000000000000000', b'foo') 88 | self.repository.put(b'00000000000000000000000000000000', b'foo2') 89 | self.repository.commit() 90 | self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo2') 91 | 92 | def test_single_kind_transactions(self): 93 | # put 94 | self.repository.put(b'00000000000000000000000000000000', b'foo') 95 | self.repository.commit() 96 | self.repository.close() 97 | # replace 98 | self.repository = self.open() 99 | self.repository.put(b'00000000000000000000000000000000', b'bar') 100 | self.repository.commit() 101 | self.repository.close() 102 | # delete 103 | self.repository = self.open() 104 | self.repository.delete(b'00000000000000000000000000000000') 105 | self.repository.commit() 106 | 107 | def test_list(self): 108 | for x in range(100): 109 | self.repository.put(('%-32d' % x).encode('ascii'), b'SOMEDATA') 110 | all = self.repository.list() 111 | self.assert_equal(len(all), 100) 112 | first_half = self.repository.list(limit=50) 113 | self.assert_equal(len(first_half), 50) 114 | self.assert_equal(first_half, all[:50]) 115 | second_half = self.repository.list(marker=first_half[-1]) 116 | self.assert_equal(len(second_half), 50) 117 | self.assert_equal(second_half, all[50:]) 118 | self.assert_equal(len(self.repository.list(limit=50)), 50) 119 | 120 | 121 | class RepositoryCommitTestCase(RepositoryTestCaseBase): 122 | 123 | def add_keys(self): 124 | self.repository.put(b'00000000000000000000000000000000', b'foo') 125 | self.repository.put(b'00000000000000000000000000000001', b'bar') 126 | self.repository.put(b'00000000000000000000000000000003', b'bar') 127 | self.repository.commit() 128 | self.repository.put(b'00000000000000000000000000000001', b'bar2') 129 | self.repository.put(b'00000000000000000000000000000002', b'boo') 130 | self.repository.delete(b'00000000000000000000000000000003') 131 | 132 | def test_replay_of_missing_index(self): 133 | self.add_keys() 134 | for name in os.listdir(self.repository.path): 135 | if name.startswith('index.'): 136 | os.unlink(os.path.join(self.repository.path, name)) 137 | self.reopen() 138 | self.assert_equal(len(self.repository), 3) 139 | self.assert_equal(self.repository.check(), True) 140 | 141 | def test_crash_before_compact_segments(self): 142 | self.add_keys() 143 | self.repository.compact_segments = None 144 | try: 145 | self.repository.commit() 146 | except TypeError: 147 | pass 148 | self.reopen() 149 | self.assert_equal(len(self.repository), 3) 150 | self.assert_equal(self.repository.check(), True) 151 | 152 | def test_replay_of_readonly_repository(self): 153 | self.add_keys() 154 | for name in os.listdir(self.repository.path): 155 | if name.startswith('index.'): 156 | os.unlink(os.path.join(self.repository.path, name)) 157 | with patch.object(UpgradableLock, 'upgrade', side_effect=UpgradableLock.WriteLockFailed) as upgrade: 158 | self.reopen() 159 | self.assert_raises(UpgradableLock.WriteLockFailed, lambda: len(self.repository)) 160 | upgrade.assert_called_once() 161 | 162 | 163 | def test_crash_before_write_index(self): 164 | self.add_keys() 165 | self.repository.write_index = None 166 | try: 167 | self.repository.commit() 168 | except TypeError: 169 | pass 170 | self.reopen() 171 | self.assert_equal(len(self.repository), 3) 172 | self.assert_equal(self.repository.check(), True) 173 | 174 | def test_crash_before_deleting_compacted_segments(self): 175 | self.add_keys() 176 | self.repository.io.delete_segment = None 177 | try: 178 | self.repository.commit() 179 | except TypeError: 180 | pass 181 | self.reopen() 182 | self.assert_equal(len(self.repository), 3) 183 | self.assert_equal(self.repository.check(), True) 184 | self.assert_equal(len(self.repository), 3) 185 | 186 | 187 | class RepositoryCheckTestCase(RepositoryTestCaseBase): 188 | 189 | def list_indices(self): 190 | return [name for name in os.listdir(os.path.join(self.tmppath, 'repository')) if name.startswith('index.')] 191 | 192 | def check(self, repair=False, status=True): 193 | self.assert_equal(self.repository.check(repair=repair), status) 194 | # Make sure no tmp files are left behind 195 | self.assert_equal([name for name in os.listdir(os.path.join(self.tmppath, 'repository')) if 'tmp' in name], [], 'Found tmp files') 196 | 197 | def get_objects(self, *ids): 198 | for id_ in ids: 199 | self.repository.get(('%032d' % id_).encode('ascii')) 200 | 201 | def add_objects(self, segments): 202 | for ids in segments: 203 | for id_ in ids: 204 | self.repository.put(('%032d' % id_).encode('ascii'), b'data') 205 | self.repository.commit() 206 | 207 | def get_head(self): 208 | return sorted(int(n) for n in os.listdir(os.path.join(self.tmppath, 'repository', 'data', '0')) if n.isdigit())[-1] 209 | 210 | def open_index(self): 211 | return NSIndex.read(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head()))) 212 | 213 | def corrupt_object(self, id_): 214 | idx = self.open_index() 215 | segment, offset = idx[('%032d' % id_).encode('ascii')] 216 | with open(os.path.join(self.tmppath, 'repository', 'data', '0', str(segment)), 'r+b') as fd: 217 | fd.seek(offset) 218 | fd.write(b'BOOM') 219 | 220 | def delete_segment(self, segment): 221 | os.unlink(os.path.join(self.tmppath, 'repository', 'data', '0', str(segment))) 222 | 223 | def delete_index(self): 224 | os.unlink(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head()))) 225 | 226 | def rename_index(self, new_name): 227 | os.rename(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())), 228 | os.path.join(self.tmppath, 'repository', new_name)) 229 | 230 | def list_objects(self): 231 | return set(int(key) for key in self.repository.list()) 232 | 233 | def test_repair_corrupted_segment(self): 234 | self.add_objects([[1, 2, 3], [4, 5, 6]]) 235 | self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) 236 | self.check(status=True) 237 | self.corrupt_object(5) 238 | self.assert_raises(IntegrityError, lambda: self.get_objects(5)) 239 | self.repository.rollback() 240 | # Make sure a regular check does not repair anything 241 | self.check(status=False) 242 | self.check(status=False) 243 | # Make sure a repair actually repairs the repo 244 | self.check(repair=True, status=True) 245 | self.get_objects(4) 246 | self.check(status=True) 247 | self.assert_equal(set([1, 2, 3, 4, 6]), self.list_objects()) 248 | 249 | def test_repair_missing_segment(self): 250 | self.add_objects([[1, 2, 3], [4, 5, 6]]) 251 | self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) 252 | self.check(status=True) 253 | self.delete_segment(1) 254 | self.repository.rollback() 255 | self.check(repair=True, status=True) 256 | self.assert_equal(set([1, 2, 3]), self.list_objects()) 257 | 258 | def test_repair_missing_commit_segment(self): 259 | self.add_objects([[1, 2, 3], [4, 5, 6]]) 260 | self.delete_segment(1) 261 | self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4)) 262 | self.assert_equal(set([1, 2, 3]), self.list_objects()) 263 | 264 | def test_repair_corrupted_commit_segment(self): 265 | self.add_objects([[1, 2, 3], [4, 5, 6]]) 266 | with open(os.path.join(self.tmppath, 'repository', 'data', '0', '1'), 'r+b') as fd: 267 | fd.seek(-1, os.SEEK_END) 268 | fd.write(b'X') 269 | self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4)) 270 | self.check(status=True) 271 | self.get_objects(3) 272 | self.assert_equal(set([1, 2, 3]), self.list_objects()) 273 | 274 | def test_repair_no_commits(self): 275 | self.add_objects([[1, 2, 3]]) 276 | with open(os.path.join(self.tmppath, 'repository', 'data', '0', '0'), 'r+b') as fd: 277 | fd.seek(-1, os.SEEK_END) 278 | fd.write(b'X') 279 | self.assert_raises(Repository.CheckNeeded, lambda: self.get_objects(4)) 280 | self.check(status=False) 281 | self.check(status=False) 282 | self.assert_equal(self.list_indices(), ['index.0']) 283 | self.check(repair=True, status=True) 284 | self.assert_equal(self.list_indices(), ['index.1']) 285 | self.check(status=True) 286 | self.get_objects(3) 287 | self.assert_equal(set([1, 2, 3]), self.list_objects()) 288 | 289 | def test_repair_missing_index(self): 290 | self.add_objects([[1, 2, 3], [4, 5, 6]]) 291 | self.delete_index() 292 | self.check(status=True) 293 | self.get_objects(4) 294 | self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) 295 | 296 | def test_repair_index_too_new(self): 297 | self.add_objects([[1, 2, 3], [4, 5, 6]]) 298 | self.assert_equal(self.list_indices(), ['index.1']) 299 | self.rename_index('index.100') 300 | self.check(status=True) 301 | self.assert_equal(self.list_indices(), ['index.1']) 302 | self.get_objects(4) 303 | self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects()) 304 | 305 | def test_crash_before_compact(self): 306 | self.repository.put(bytes(32), b'data') 307 | self.repository.put(bytes(32), b'data2') 308 | # Simulate a crash before compact 309 | with patch.object(Repository, 'compact_segments') as compact: 310 | self.repository.commit() 311 | compact.assert_called_once() 312 | self.reopen() 313 | self.check(repair=True) 314 | self.assert_equal(self.repository.get(bytes(32)), b'data2') 315 | 316 | 317 | class RemoteRepositoryTestCase(RepositoryTestCase): 318 | 319 | def open(self, create=False): 320 | return RemoteRepository(Location('__testsuite__:' + os.path.join(self.tmppath, 'repository')), create=create) 321 | 322 | 323 | class RemoteRepositoryCheckTestCase(RepositoryCheckTestCase): 324 | 325 | def open(self, create=False): 326 | return RemoteRepository(Location('__testsuite__:' + os.path.join(self.tmppath, 'repository')), create=create) 327 | -------------------------------------------------------------------------------- /attic/testsuite/run.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from attic.testsuite import TestLoader 3 | 4 | 5 | def main(): 6 | unittest.main(testLoader=TestLoader(), defaultTest='') 7 | 8 | 9 | if __name__ == '__main__': 10 | main() 11 | -------------------------------------------------------------------------------- /attic/testsuite/xattr.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import unittest 4 | from attic.testsuite import AtticTestCase 5 | from attic.xattr import is_enabled, getxattr, setxattr, listxattr 6 | 7 | @unittest.skipUnless(is_enabled(), 'xattr not enabled on filesystem') 8 | class XattrTestCase(AtticTestCase): 9 | 10 | def setUp(self): 11 | self.tmpfile = tempfile.NamedTemporaryFile() 12 | self.symlink = os.path.join(os.path.dirname(self.tmpfile.name), 'symlink') 13 | os.symlink(self.tmpfile.name, self.symlink) 14 | 15 | def tearDown(self): 16 | os.unlink(self.symlink) 17 | 18 | def test(self): 19 | self.assert_equal(listxattr(self.tmpfile.name), []) 20 | self.assert_equal(listxattr(self.tmpfile.fileno()), []) 21 | self.assert_equal(listxattr(self.symlink), []) 22 | setxattr(self.tmpfile.name, 'user.foo', b'bar') 23 | setxattr(self.tmpfile.fileno(), 'user.bar', b'foo') 24 | setxattr(self.tmpfile.name, 'user.empty', None) 25 | self.assert_equal(set(listxattr(self.tmpfile.name)), set(['user.foo', 'user.bar', 'user.empty'])) 26 | self.assert_equal(set(listxattr(self.tmpfile.fileno())), set(['user.foo', 'user.bar', 'user.empty'])) 27 | self.assert_equal(set(listxattr(self.symlink)), set(['user.foo', 'user.bar', 'user.empty'])) 28 | self.assert_equal(listxattr(self.symlink, follow_symlinks=False), []) 29 | self.assert_equal(getxattr(self.tmpfile.name, 'user.foo'), b'bar') 30 | self.assert_equal(getxattr(self.tmpfile.fileno(), 'user.foo'), b'bar') 31 | self.assert_equal(getxattr(self.symlink, 'user.foo'), b'bar') 32 | self.assert_equal(getxattr(self.tmpfile.name, 'user.empty'), None) 33 | -------------------------------------------------------------------------------- /attic/xattr.py: -------------------------------------------------------------------------------- 1 | """A basic extended attributes (xattr) implementation for Linux and MacOS X 2 | """ 3 | import errno 4 | import os 5 | import sys 6 | import tempfile 7 | from ctypes import CDLL, create_string_buffer, c_ssize_t, c_size_t, c_char_p, c_int, c_uint32, get_errno 8 | from ctypes.util import find_library 9 | 10 | 11 | def is_enabled(): 12 | """Determine if xattr is enabled on the filesystem 13 | """ 14 | with tempfile.NamedTemporaryFile() as fd: 15 | try: 16 | setxattr(fd.fileno(), 'user.name', b'value') 17 | except OSError: 18 | return False 19 | return getxattr(fd.fileno(), 'user.name') == b'value' 20 | 21 | 22 | def get_all(path, follow_symlinks=True): 23 | try: 24 | return dict((name, getxattr(path, name, follow_symlinks=follow_symlinks)) 25 | for name in listxattr(path, follow_symlinks=follow_symlinks)) 26 | except OSError as e: 27 | if e.errno in (errno.ENOTSUP, errno.EPERM): 28 | return {} 29 | 30 | 31 | libc = CDLL(find_library('c'), use_errno=True) 32 | 33 | 34 | def _check(rv, path=None): 35 | if rv < 0: 36 | raise OSError(get_errno(), path) 37 | return rv 38 | 39 | if sys.platform.startswith('linux'): 40 | libc.llistxattr.argtypes = (c_char_p, c_char_p, c_size_t) 41 | libc.llistxattr.restype = c_ssize_t 42 | libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t) 43 | libc.flistxattr.restype = c_ssize_t 44 | libc.lsetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_int) 45 | libc.lsetxattr.restype = c_int 46 | libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_int) 47 | libc.fsetxattr.restype = c_int 48 | libc.lgetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t) 49 | libc.lgetxattr.restype = c_ssize_t 50 | libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t) 51 | libc.fgetxattr.restype = c_ssize_t 52 | 53 | def listxattr(path, *, follow_symlinks=True): 54 | if isinstance(path, str): 55 | path = os.fsencode(path) 56 | if isinstance(path, int): 57 | func = libc.flistxattr 58 | elif follow_symlinks: 59 | func = libc.listxattr 60 | else: 61 | func = libc.llistxattr 62 | n = _check(func(path, None, 0), path) 63 | if n == 0: 64 | return [] 65 | namebuf = create_string_buffer(n) 66 | n2 = _check(func(path, namebuf, n), path) 67 | if n2 != n: 68 | raise Exception('listxattr failed') 69 | return [os.fsdecode(name) for name in namebuf.raw.split(b'\0')[:-1] if not name.startswith(b'system.posix_acl_')] 70 | 71 | def getxattr(path, name, *, follow_symlinks=True): 72 | name = os.fsencode(name) 73 | if isinstance(path, str): 74 | path = os.fsencode(path) 75 | if isinstance(path, int): 76 | func = libc.fgetxattr 77 | elif follow_symlinks: 78 | func = libc.getxattr 79 | else: 80 | func = libc.lgetxattr 81 | n = _check(func(path, name, None, 0)) 82 | if n == 0: 83 | return 84 | valuebuf = create_string_buffer(n) 85 | n2 = _check(func(path, name, valuebuf, n), path) 86 | if n2 != n: 87 | raise Exception('getxattr failed') 88 | return valuebuf.raw 89 | 90 | def setxattr(path, name, value, *, follow_symlinks=True): 91 | name = os.fsencode(name) 92 | value = value and os.fsencode(value) 93 | if isinstance(path, str): 94 | path = os.fsencode(path) 95 | if isinstance(path, int): 96 | func = libc.fsetxattr 97 | elif follow_symlinks: 98 | func = libc.setxattr 99 | else: 100 | func = libc.lsetxattr 101 | _check(func(path, name, value, len(value) if value else 0, 0), path) 102 | 103 | elif sys.platform == 'darwin': 104 | libc.listxattr.argtypes = (c_char_p, c_char_p, c_size_t, c_int) 105 | libc.listxattr.restype = c_ssize_t 106 | libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t) 107 | libc.flistxattr.restype = c_ssize_t 108 | libc.setxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int) 109 | libc.setxattr.restype = c_int 110 | libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int) 111 | libc.fsetxattr.restype = c_int 112 | libc.getxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int) 113 | libc.getxattr.restype = c_ssize_t 114 | libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int) 115 | libc.fgetxattr.restype = c_ssize_t 116 | 117 | XATTR_NOFOLLOW = 0x0001 118 | 119 | def listxattr(path, *, follow_symlinks=True): 120 | func = libc.listxattr 121 | flags = 0 122 | if isinstance(path, str): 123 | path = os.fsencode(path) 124 | if isinstance(path, int): 125 | func = libc.flistxattr 126 | elif not follow_symlinks: 127 | flags = XATTR_NOFOLLOW 128 | n = _check(func(path, None, 0, flags), path) 129 | if n == 0: 130 | return [] 131 | namebuf = create_string_buffer(n) 132 | n2 = _check(func(path, namebuf, n, flags), path) 133 | if n2 != n: 134 | raise Exception('listxattr failed') 135 | return [os.fsdecode(name) for name in namebuf.raw.split(b'\0')[:-1]] 136 | 137 | def getxattr(path, name, *, follow_symlinks=True): 138 | name = os.fsencode(name) 139 | func = libc.getxattr 140 | flags = 0 141 | if isinstance(path, str): 142 | path = os.fsencode(path) 143 | if isinstance(path, int): 144 | func = libc.fgetxattr 145 | elif not follow_symlinks: 146 | flags = XATTR_NOFOLLOW 147 | n = _check(func(path, name, None, 0, 0, flags)) 148 | if n == 0: 149 | return 150 | valuebuf = create_string_buffer(n) 151 | n2 = _check(func(path, name, valuebuf, n, 0, flags), path) 152 | if n2 != n: 153 | raise Exception('getxattr failed') 154 | return valuebuf.raw 155 | 156 | def setxattr(path, name, value, *, follow_symlinks=True): 157 | name = os.fsencode(name) 158 | value = value and os.fsencode(value) 159 | func = libc.setxattr 160 | flags = 0 161 | if isinstance(path, str): 162 | path = os.fsencode(path) 163 | if isinstance(path, int): 164 | func = libc.fsetxattr 165 | elif not follow_symlinks: 166 | flags = XATTR_NOFOLLOW 167 | _check(func(path, name, value, len(value) if value else 0, 0, flags), path) 168 | 169 | elif sys.platform.startswith('freebsd'): 170 | EXTATTR_NAMESPACE_USER = 0x0001 171 | libc.extattr_list_fd.argtypes = (c_int, c_int, c_char_p, c_size_t) 172 | libc.extattr_list_fd.restype = c_ssize_t 173 | libc.extattr_list_link.argtypes = (c_char_p, c_int, c_char_p, c_size_t) 174 | libc.extattr_list_link.restype = c_ssize_t 175 | libc.extattr_list_file.argtypes = (c_char_p, c_int, c_char_p, c_size_t) 176 | libc.extattr_list_file.restype = c_ssize_t 177 | libc.extattr_get_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t) 178 | libc.extattr_get_fd.restype = c_ssize_t 179 | libc.extattr_get_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t) 180 | libc.extattr_get_link.restype = c_ssize_t 181 | libc.extattr_get_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t) 182 | libc.extattr_get_file.restype = c_ssize_t 183 | libc.extattr_set_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t) 184 | libc.extattr_set_fd.restype = c_int 185 | libc.extattr_set_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t) 186 | libc.extattr_set_link.restype = c_int 187 | libc.extattr_set_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t) 188 | libc.extattr_set_file.restype = c_int 189 | 190 | def listxattr(path, *, follow_symlinks=True): 191 | ns = EXTATTR_NAMESPACE_USER 192 | if isinstance(path, str): 193 | path = os.fsencode(path) 194 | if isinstance(path, int): 195 | func = libc.extattr_list_fd 196 | elif follow_symlinks: 197 | func = libc.extattr_list_file 198 | else: 199 | func = libc.extattr_list_link 200 | n = _check(func(path, ns, None, 0), path) 201 | if n == 0: 202 | return [] 203 | namebuf = create_string_buffer(n) 204 | n2 = _check(func(path, ns, namebuf, n), path) 205 | if n2 != n: 206 | raise Exception('listxattr failed') 207 | names = [] 208 | mv = memoryview(namebuf.raw) 209 | while mv: 210 | length = mv[0] 211 | # Python < 3.3 returns bytes instead of int 212 | if isinstance(length, bytes): 213 | length = ord(length) 214 | names.append(os.fsdecode(bytes(mv[1:1+length]))) 215 | mv = mv[1+length:] 216 | return names 217 | 218 | def getxattr(path, name, *, follow_symlinks=True): 219 | name = os.fsencode(name) 220 | if isinstance(path, str): 221 | path = os.fsencode(path) 222 | if isinstance(path, int): 223 | func = libc.extattr_get_fd 224 | elif follow_symlinks: 225 | func = libc.extattr_get_file 226 | else: 227 | func = libc.extattr_get_link 228 | n = _check(func(path, EXTATTR_NAMESPACE_USER, name, None, 0)) 229 | if n == 0: 230 | return 231 | valuebuf = create_string_buffer(n) 232 | n2 = _check(func(path, EXTATTR_NAMESPACE_USER, name, valuebuf, n), path) 233 | if n2 != n: 234 | raise Exception('getxattr failed') 235 | return valuebuf.raw 236 | 237 | def setxattr(path, name, value, *, follow_symlinks=True): 238 | name = os.fsencode(name) 239 | value = value and os.fsencode(value) 240 | if isinstance(path, str): 241 | path = os.fsencode(path) 242 | if isinstance(path, int): 243 | func = libc.extattr_set_fd 244 | elif follow_symlinks: 245 | func = libc.extattr_set_file 246 | else: 247 | func = libc.extattr_set_link 248 | _check(func(path, EXTATTR_NAMESPACE_USER, name, value, len(value) if value else 0), path) 249 | 250 | else: 251 | def listxattr(path, *, follow_symlinks=True): 252 | return [] 253 | 254 | def getxattr(path, name, *, follow_symlinks=True): 255 | pass 256 | 257 | def setxattr(path, name, value, *, follow_symlinks=True): 258 | pass 259 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | ./update_usage.sh 41 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 42 | @echo 43 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 44 | 45 | dirhtml: 46 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 47 | @echo 48 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 49 | 50 | singlehtml: 51 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 52 | @echo 53 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 54 | 55 | pickle: 56 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 57 | @echo 58 | @echo "Build finished; now you can process the pickle files." 59 | 60 | json: 61 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 62 | @echo 63 | @echo "Build finished; now you can process the JSON files." 64 | 65 | htmlhelp: 66 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 67 | @echo 68 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 69 | ".hhp project file in $(BUILDDIR)/htmlhelp." 70 | 71 | qthelp: 72 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 73 | @echo 74 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 75 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 76 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/attic.qhcp" 77 | @echo "To view the help file:" 78 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/attic.qhc" 79 | 80 | devhelp: 81 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 82 | @echo 83 | @echo "Build finished." 84 | @echo "To view the help file:" 85 | @echo "# mkdir -p $$HOME/.local/share/devhelp/attic" 86 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/attic" 87 | @echo "# devhelp" 88 | 89 | epub: 90 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 91 | @echo 92 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 93 | 94 | latex: 95 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 96 | @echo 97 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 98 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 99 | "(use \`make latexpdf' here to do that automatically)." 100 | 101 | latexpdf: 102 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 103 | @echo "Running LaTeX files through pdflatex..." 104 | make -C $(BUILDDIR)/latex all-pdf 105 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 106 | 107 | text: 108 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 109 | @echo 110 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 111 | 112 | man: 113 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 114 | @echo 115 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 116 | 117 | changes: 118 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 119 | @echo 120 | @echo "The overview file is in $(BUILDDIR)/changes." 121 | 122 | linkcheck: 123 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 124 | @echo 125 | @echo "Link check complete; look for any errors in the above output " \ 126 | "or in $(BUILDDIR)/linkcheck/output.txt." 127 | 128 | doctest: 129 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 130 | @echo "Testing of doctests in the sources finished, look at the " \ 131 | "results in $(BUILDDIR)/doctest/output.txt." 132 | 133 | gh-pages: html 134 | GH_PAGES_CLONE="`mktemp -d`" && \ 135 | git clone --branch gh-pages `git rev-parse --show-toplevel` $$GH_PAGES_CLONE && \ 136 | (cd $$GH_PAGES_CLONE && git rm -r *) && \ 137 | cp -r _build/html/* $$GH_PAGES_CLONE && \ 138 | (cd $$GH_PAGES_CLONE && git add -A && git commit -m 'Updated gh-pages' && git push) && \ 139 | rm -rf $$GH_PAGES_CLONE 140 | 141 | inotify: html 142 | while inotifywait -r . --exclude usage.rst --exclude '_build/*' ; do make html ; done 143 | 144 | upload: html 145 | rsync -va -e ssh _build/html/ sushi.edgewall.com:/srv/attic/www/ 146 | 147 | -------------------------------------------------------------------------------- /docs/_themes/attic/sidebarlogo.html: -------------------------------------------------------------------------------- 1 | 3 | -------------------------------------------------------------------------------- /docs/_themes/attic/sidebarusefullinks.html: -------------------------------------------------------------------------------- 1 | Fork me on GitHub 3 | 4 |

Useful Links

5 | -------------------------------------------------------------------------------- /docs/_themes/attic/static/attic.css_t: -------------------------------------------------------------------------------- 1 | @import url("basic.css"); 2 | 3 | body { 4 | font-family: Helvetica; 5 | background-color: white; 6 | margin: 0; 7 | padding: 0; 8 | position: relative; 9 | } 10 | div.related { 11 | display: none; 12 | background-color: black; 13 | padding: .4em; 14 | width: 800px; 15 | margin: 0 auto; 16 | } 17 | div.related a { 18 | color: white; 19 | text-decoration: none; 20 | } 21 | div.document { 22 | width: 1030px; 23 | margin: 0 auto; 24 | } 25 | 26 | div.documentwrapper { 27 | float: right; 28 | width: 760px; 29 | padding: 0 20px 20px 20px; 30 | background-color: #f3f3f3; 31 | margin-bottom: 2em; 32 | } 33 | div.sphinxsidebar { 34 | margin-left: 0; 35 | parring-right: 20px; 36 | width: 230px; 37 | background: #e9e9e9; 38 | position: absolute; 39 | top: 0; 40 | min-height: 100%; 41 | } 42 | 43 | h1, h2, h3 { 44 | font-family: "Oswald"; 45 | font-weight: normal; 46 | color: #333; 47 | } 48 | h1 { 49 | margin: .8em 0 .5em; 50 | } 51 | h2, h3 { 52 | margin: 1.2em 0 .6em; 53 | } 54 | h1 { font-size: 200%;} 55 | h2 { font-size: 140%;} 56 | h3 { font-size: 110%;} 57 | ul { 58 | padding-left: 1.2em; 59 | margin-bottom: .3em; 60 | } 61 | ul ul { 62 | font-size: 95%; 63 | } 64 | li { 65 | margin: .1em 0; 66 | } 67 | a:link, a:visited { 68 | color: #00608f; 69 | text-decoration: none; 70 | } 71 | a:hover { 72 | color: #00B0E4; 73 | border-bottom: 1px dotted #00B0E4; 74 | } 75 | 76 | div.sphinxsidebar a:link, div.sphinxsidebar a:visited { 77 | color: #555; 78 | border-bottom: 1px dotted #555; 79 | } 80 | 81 | div.sphinxsidebar input { 82 | border: 1px solid #ccc; 83 | } 84 | 85 | pre { 86 | padding: 10px 20px; 87 | background: white; 88 | color: #222; 89 | line-height: 1.5em; 90 | border-bottom: 2px solid black; 91 | font-family: "Inconsolata"; 92 | } 93 | pre a:link, 94 | pre a:visited { 95 | color: #00B0E4; 96 | } 97 | 98 | div.sidebarlogo .title { 99 | font-family: "Oswald"; 100 | font-size: 500%; 101 | } 102 | div.sidebarlogo .subtitle { 103 | font-style: italic; 104 | color: #777; 105 | } 106 | tt span.pre { 107 | font-size: 110%; 108 | } 109 | dt { 110 | font-family: "Oswald"; 111 | font-size: 95%; 112 | } 113 | 114 | div.admonition p.admonition-title + p { 115 | display: inline; 116 | } 117 | 118 | div.admonition p { 119 | margin-bottom: 5px; 120 | } 121 | 122 | p.admonition-title { 123 | display: inline; 124 | } 125 | 126 | p.admonition-title:after { 127 | content: ":"; 128 | } 129 | 130 | div.note { 131 | background-color: #ff5; 132 | border-bottom: 2px solid #d22; 133 | } 134 | 135 | div.seealso { 136 | background-color: #ffe; 137 | border: 1px solid #ff6; 138 | border-radius: .4em; 139 | box-shadow: 2px 2px #dd6; 140 | } 141 | -------------------------------------------------------------------------------- /docs/_themes/attic/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = attic.css 4 | pygments_style = tango 5 | 6 | [options] 7 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Attic documentation build configuration file, created by 4 | # sphinx-quickstart on Sat Sep 10 18:18:25 2011. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os, attic 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | #sys.path.insert(0, os.path.abspath('.')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = [] 29 | 30 | # Add any paths that contain templates here, relative to this directory. 31 | templates_path = ['_templates'] 32 | 33 | # The suffix of source filenames. 34 | source_suffix = '.rst' 35 | 36 | # The encoding of source files. 37 | #source_encoding = 'utf-8-sig' 38 | 39 | # The master toctree document. 40 | master_doc = 'index' 41 | 42 | # General information about the project. 43 | project = 'Attic - Deduplicating Archiver' 44 | copyright = '2010-2014, Jonas Borgström' 45 | 46 | # The version info for the project you're documenting, acts as replacement for 47 | # |version| and |release|, also used in various other places throughout the 48 | # built documents. 49 | # 50 | # The short X.Y version. 51 | version = attic.__version__.split('-')[0] 52 | # The full version, including alpha/beta/rc tags. 53 | release = version 54 | 55 | # The language for content autogenerated by Sphinx. Refer to documentation 56 | # for a list of supported languages. 57 | #language = None 58 | 59 | # There are two options for replacing |today|: either, you set today to some 60 | # non-false value, then it is used: 61 | #today = '' 62 | # Else, today_fmt is used as the format for a strftime call. 63 | #today_fmt = '%B %d, %Y' 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | exclude_patterns = ['_build'] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. See the documentation for 93 | # a list of builtin themes. 94 | html_theme = 'attic' 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | #html_theme_options = {} 100 | 101 | # Add any paths that contain custom themes here, relative to this directory. 102 | html_theme_path = ['_themes'] 103 | 104 | # The name for this set of Sphinx documents. If None, it defaults to 105 | # " v documentation". 106 | #html_title = None 107 | 108 | # A shorter title for the navigation bar. Default is the same as html_title. 109 | #html_short_title = None 110 | 111 | # The name of an image file (relative to this directory) to place at the top 112 | # of the sidebar. 113 | #html_logo = None 114 | 115 | # The name of an image file (within the static path) to use as favicon of the 116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 117 | # pixels large. 118 | #html_favicon = None 119 | 120 | # Add any paths that contain custom static files (such as style sheets) here, 121 | # relative to this directory. They are copied after the builtin static files, 122 | # so a file named "default.css" will overwrite the builtin "default.css". 123 | html_static_path = ['_static'] 124 | 125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 126 | # using the given strftime format. 127 | #html_last_updated_fmt = '%b %d, %Y' 128 | 129 | # If true, SmartyPants will be used to convert quotes and dashes to 130 | # typographically correct entities. 131 | #html_use_smartypants = True 132 | 133 | # Custom sidebar templates, maps document names to template names. 134 | html_sidebars = { 135 | 'index': ['sidebarlogo.html', 'sidebarusefullinks.html', 'searchbox.html'], 136 | '**': ['sidebarlogo.html', 'localtoc.html', 'relations.html', 'sidebarusefullinks.html', 'searchbox.html'] 137 | } 138 | # Additional templates that should be rendered to pages, maps page names to 139 | # template names. 140 | #html_additional_pages = {} 141 | 142 | # If false, no module index is generated. 143 | #html_domain_indices = True 144 | 145 | # If false, no index is generated. 146 | html_use_index = False 147 | 148 | # If true, the index is split into individual pages for each letter. 149 | #html_split_index = False 150 | 151 | # If true, links to the reST sources are added to the pages. 152 | html_show_sourcelink = False 153 | 154 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 155 | html_show_sphinx = False 156 | 157 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 158 | html_show_copyright = False 159 | 160 | # If true, an OpenSearch description file will be output, and all pages will 161 | # contain a tag referring to it. The value of this option must be the 162 | # base URL from which the finished HTML is served. 163 | #html_use_opensearch = '' 164 | 165 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 166 | #html_file_suffix = None 167 | 168 | # Output file base name for HTML help builder. 169 | htmlhelp_basename = 'atticdoc' 170 | 171 | 172 | # -- Options for LaTeX output -------------------------------------------------- 173 | 174 | # The paper size ('letter' or 'a4'). 175 | #latex_paper_size = 'letter' 176 | 177 | # The font size ('10pt', '11pt' or '12pt'). 178 | #latex_font_size = '10pt' 179 | 180 | # Grouping the document tree into LaTeX files. List of tuples 181 | # (source start file, target name, title, author, documentclass [howto/manual]). 182 | latex_documents = [ 183 | ('index', 'Attic.tex', 'Attic Documentation', 184 | 'Jonas Borgström', 'manual'), 185 | ] 186 | 187 | # The name of an image file (relative to this directory) to place at the top of 188 | # the title page. 189 | #latex_logo = None 190 | 191 | # For "manual" documents, if this is true, then toplevel headings are parts, 192 | # not chapters. 193 | #latex_use_parts = False 194 | 195 | # If true, show page references after internal links. 196 | #latex_show_pagerefs = False 197 | 198 | # If true, show URL addresses after external links. 199 | #latex_show_urls = False 200 | 201 | # Additional stuff for the LaTeX preamble. 202 | #latex_preamble = '' 203 | 204 | # Documents to append as an appendix to all manuals. 205 | #latex_appendices = [] 206 | 207 | # If false, no module index is generated. 208 | #latex_domain_indices = True 209 | 210 | 211 | # -- Options for manual page output -------------------------------------------- 212 | 213 | # One entry per manual page. List of tuples 214 | # (source start file, name, description, authors, manual section). 215 | #man_pages = [ 216 | # ('man', 'attic', 'Attic', 217 | # ['Jonas Borgström'], 1) 218 | #] 219 | 220 | extensions = ['sphinx.ext.extlinks'] 221 | 222 | extlinks = { 223 | 'issue': ('https://github.com/jborg/attic/issues/%s', '#'), 224 | 'targz_url': ('https://pypi.python.org/packages/source/A/Attic/%%s-%s.tar.gz' % version, None), 225 | 'artifacts': ('https://attic-backup.org/downloads/releases/%s/%%s' % version, '') 226 | } 227 | -------------------------------------------------------------------------------- /docs/faq.rst: -------------------------------------------------------------------------------- 1 | .. _faq: 2 | .. include:: global.rst.inc 3 | 4 | Frequently asked questions 5 | ========================== 6 | 7 | Which platforms are supported? 8 | Currently Linux, FreeBSD and MacOS X are supported. 9 | 10 | 11 | Can I backup VM disk images? 12 | Yes, the :ref:`deduplication ` technique used by |project_name| 13 | makes sure only the modified parts of the file are stored. 14 | 15 | Which file attributes are preserved? 16 | The following attributes are preserved: 17 | 18 | * Name 19 | * Contents 20 | * Time of last modification (nanosecond precision with Python >= 3.3) 21 | * User ID of owner 22 | * Group ID of owner 23 | * Unix Permission 24 | * Extended attributes (xattrs) 25 | * Access Control Lists (ACL_) on Linux, OS X and FreeBSD 26 | * BSD flags on OS X and FreeBSD 27 | 28 | How can I specify the encryption passphrase programmatically? 29 | The encryption passphrase can be specified programmatically using the 30 | `ATTIC_PASSPHRASE` environment variable. This is convenient when setting up 31 | automated encrypted backups. Another option is to use 32 | key file based encryption with a blank passphrase. See 33 | :ref:`encrypted_repos` for more details. 34 | 35 | When backing up to remote servers, is data encrypted before leaving the local machine, or do I have to trust that the remote server isn't malicious? 36 | Yes, everything is encrypted before leaving the local machine. 37 | 38 | If a backup stops mid-way, does the already-backed-up data stay there? I.e. does Attic resume backups? 39 | Yes, during a backup a special checkpoint archive named ``.checkpoint`` is saved every 5 minutes 40 | containing all the data backed-up until that point. This means that at most 5 minutes worth of data needs to be 41 | retransmitted if a backup needs to be restarted. 42 | -------------------------------------------------------------------------------- /docs/foreword.rst: -------------------------------------------------------------------------------- 1 | .. include:: global.rst.inc 2 | .. _foreword: 3 | 4 | Foreword 5 | ======== 6 | 7 | |project_name| is a secure backup program for Linux, FreeBSD and Mac OS X. 8 | |project_name| is designed for efficient data storage where only new or 9 | modified data is stored. 10 | 11 | Features 12 | -------- 13 | 14 | Space efficient storage 15 | Variable block size `deduplication`_ is used to reduce the number of bytes 16 | stored by detecting redundant data. Each file is split into a number of 17 | variable length chunks and only chunks that have never been seen before 18 | are compressed and added to the repository. 19 | 20 | Optional data encryption 21 | All data can be protected using 256-bit AES_ encryption and data integrity 22 | and authenticity is verified using `HMAC-SHA256`_. 23 | 24 | Off-site backups 25 | |project_name| can store data on any remote host accessible over SSH as 26 | long as |project_name| is installed. 27 | 28 | Backups mountable as filesystems 29 | Backup archives are :ref:`mountable ` as 30 | `userspace filesystems`_ for easy backup verification and restores. 31 | 32 | 33 | Glossary 34 | -------- 35 | 36 | .. _deduplication_def: 37 | 38 | Deduplication 39 | Deduplication is a technique for improving storage utilization by 40 | eliminating redundant data. 41 | 42 | .. _archive_def: 43 | 44 | Archive 45 | An archive is a collection of files along with metadata that include file 46 | permissions, directory structure and various file attributes. 47 | Since each archive in a repository must have a unique name a good naming 48 | convention is ``hostname-YYYY-MM-DD``. 49 | 50 | .. _repository_def: 51 | 52 | Repository 53 | A repository is a filesystem directory storing data from zero or more 54 | archives. The data in a repository is both deduplicated and 55 | optionally encrypted making it both efficient and safe. Repositories are 56 | created using :ref:`attic_init` and the contents can be listed using 57 | :ref:`attic_list`. 58 | 59 | Key file 60 | When a repository is initialized a key file containing a password 61 | protected encryption key is created. It is vital to keep this file safe 62 | since the repository data is totally inaccessible without it. 63 | -------------------------------------------------------------------------------- /docs/global.rst.inc: -------------------------------------------------------------------------------- 1 | .. highlight:: bash 2 | .. |project_name| replace:: ``Attic`` 3 | .. |package_dirname| replace:: Attic-|version| 4 | .. |package_filename| replace:: |package_dirname|.tar.gz 5 | .. |package_url| replace:: https://pypi.python.org/packages/source/A/Attic/|package_filename| 6 | .. |git_url| replace:: https://github.com/jborg/attic.git 7 | .. _deduplication: https://en.wikipedia.org/wiki/Data_deduplication 8 | .. _AES: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard 9 | .. _HMAC-SHA256: http://en.wikipedia.org/wiki/HMAC 10 | .. _PBKDF2: https://en.wikipedia.org/wiki/PBKDF2 11 | .. _ACL: https://en.wikipedia.org/wiki/Access_control_list 12 | .. _github: https://github.com/jborg/attic 13 | .. _OpenSSL: https://www.openssl.org/ 14 | .. _Python: http://www.python.org/ 15 | .. _`msgpack-python`: https://pypi.python.org/pypi/msgpack-python/ 16 | .. _llfuse: https://pypi.python.org/pypi/llfuse/ 17 | .. _homebrew: http://mxcl.github.io/homebrew/ 18 | .. _issue tracker: https://github.com/jborg/attic/issues 19 | .. _userspace filesystems: https://en.wikipedia.org/wiki/Filesystem_in_Userspace 20 | .. _librelist: http://librelist.com/ 21 | .. _Debian: http://packages.debian.org/attic 22 | .. _Ubuntu: http://packages.ubuntu.com/attic 23 | .. _Arch Linux: https://aur.archlinux.org/packages/attic/ 24 | .. _Slackware: http://slackbuilds.org/result/?search=Attic 25 | .. _Cython: http://cython.org/ 26 | .. _virtualenv: https://pypi.python.org/pypi/virtualenv/ 27 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: global.rst.inc 2 | 3 | Welcome to Attic 4 | ================ 5 | |project_name| is a deduplicating backup program written in Python. 6 | The main goal of |project_name| is to provide an efficient and secure way 7 | to backup data. The data deduplication technique used makes |project_name| 8 | suitable for daily backups since only the changes are stored. 9 | 10 | 11 | Easy to use 12 | ----------- 13 | Initialize a new backup :ref:`repository ` and create your 14 | first backup :ref:`archive ` in two lines:: 15 | 16 | $ attic init /somewhere/my-repository.attic 17 | $ attic create /somewhere/my-repository.attic::Monday ~/Documents 18 | $ attic create --stats /somewhere/my-repository.attic::Tuesday ~/Documents 19 | Archive name: Tuesday 20 | Archive fingerprint: 387a5e3f9b0e792e91ce87134b0f4bfe17677d9248cb5337f3fbf3a8e157942a 21 | Start time: Tue Mar 25 12:00:10 2014 22 | End time: Tue Mar 25 12:00:10 2014 23 | Duration: 0.08 seconds 24 | Number of files: 358 25 | Original size Compressed size Deduplicated size 26 | This archive: 57.16 MB 46.78 MB 151.67 kB 27 | All archives: 114.02 MB 93.46 MB 44.81 MB 28 | 29 | See the :ref:`quickstart` chapter for a more detailed example. 30 | 31 | Easy installation 32 | ----------------- 33 | You can use pip to install |project_name| quickly and easily:: 34 | 35 | $ pip3 install attic 36 | 37 | |project_name| is also part of the Debian_, Ubuntu_, `Arch Linux`_ and Slackware_ 38 | distributions of GNU/Linux. 39 | 40 | Need more help with installing? See :ref:`installation`. 41 | 42 | User's Guide 43 | ============ 44 | 45 | .. toctree:: 46 | :maxdepth: 2 47 | 48 | foreword 49 | installation 50 | quickstart 51 | usage 52 | faq 53 | 54 | Getting help 55 | ============ 56 | 57 | If you've found a bug or have a concrete feature request, you can add your bug 58 | report or feature request directly to the project's `issue tracker`_. For more 59 | general questions or discussions, a post to the mailing list is preferred. 60 | 61 | Mailing list 62 | ------------ 63 | 64 | There is a mailing list for Attic on librelist_ that you can use for feature 65 | requests and general discussions about Attic. A mailing list archive is 66 | available `here `_. 67 | 68 | To subscribe to the list, send an email to attic@librelist.com and reply 69 | to the confirmation mail. Likewise, to unsubscribe, send an email to 70 | attic-unsubscribe@librelist.com and reply to the confirmation mail. 71 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. include:: global.rst.inc 2 | .. _installation: 3 | 4 | Installation 5 | ============ 6 | 7 | |project_name| requires Python_ 3.2 or above to work. Even though Python 3 is 8 | not the default Python version on most Linux distributions, it is usually 9 | available as an optional install. 10 | 11 | Other dependencies: 12 | 13 | * `msgpack-python`_ >= 0.1.10 14 | * OpenSSL_ >= 1.0.0 15 | 16 | The OpenSSL version bundled with Mac OS X and FreeBSD is most likey too old. 17 | Newer versions are available from homebrew_ on OS X and from FreeBSD ports. 18 | 19 | The llfuse_ python package is also required if you wish to mount an 20 | archive as a FUSE filesystem. 21 | 22 | Virtualenv_ can be used to build and install |project_name| 23 | without affecting the system Python or requiring root access. 24 | 25 | Installing from PyPI using pip 26 | ------------------------------ 27 | :: 28 | 29 | $ pip3 install Attic 30 | 31 | Installing from source tarballs 32 | ------------------------------- 33 | .. parsed-literal:: 34 | 35 | $ curl -O :targz_url:`Attic` 36 | $ tar -xvzf |package_filename| 37 | $ cd |package_dirname| 38 | $ python setup.py install 39 | 40 | Installing from git 41 | ------------------- 42 | .. parsed-literal:: 43 | 44 | $ git clone |git_url| 45 | $ cd attic 46 | $ python setup.py install 47 | 48 | Please note that when installing from git, Cython_ is required to generate some files that 49 | are normally bundled with the release tarball. 50 | 51 | Packages 52 | -------- 53 | 54 | |project_name| is also part of the Debian_, Ubuntu_, `Arch Linux`_ and Slackware_ 55 | distributions of GNU/Linux. 56 | 57 | Standalone binaries 58 | ------------------- 59 | 60 | Prebuilt standalone binaries that work on 61 | most Linux systems can be found :artifacts:`here <>`. 62 | -------------------------------------------------------------------------------- /docs/quickstart.rst: -------------------------------------------------------------------------------- 1 | .. include:: global.rst.inc 2 | .. _quickstart: 3 | 4 | Quick Start 5 | =========== 6 | 7 | This chapter will get you started with |project_name|. The first section 8 | presents a simple step by step example that uses |project_name| to backup data. 9 | The next section continues by showing how backups can be automated. 10 | 11 | A step by step example 12 | ---------------------- 13 | 14 | 1. Before a backup can be made a repository has to be initialized:: 15 | 16 | $ attic init /somewhere/my-repository.attic 17 | 18 | 2. Backup the ``~/src`` and ``~/Documents`` directories into an archive called 19 | *Monday*:: 20 | 21 | $ attic create /somewhere/my-repository.attic::Monday ~/src ~/Documents 22 | 23 | 3. The next day create a new archive called *Tuesday*:: 24 | 25 | $ attic create --stats /somewhere/my-repository.attic::Tuesday ~/src ~/Documents 26 | 27 | This backup will be a lot quicker and a lot smaller since only new never 28 | before seen data is stored. The ``--stats`` option causes |project_name| to 29 | output statistics about the newly created archive such as the amount of unique 30 | data (not shared with other archives):: 31 | 32 | Archive name: Tuesday 33 | Archive fingerprint: 387a5e3f9b0e792e91ce87134b0f4bfe17677d9248cb5337f3fbf3a8e157942a 34 | Start time: Tue Mar 25 12:00:10 2014 35 | End time: Tue Mar 25 12:00:10 2014 36 | Duration: 0.08 seconds 37 | Number of files: 358 38 | Original size Compressed size Deduplicated size 39 | This archive: 57.16 MB 46.78 MB 151.67 kB 40 | All archives: 114.02 MB 93.46 MB 44.81 MB 41 | 42 | 43 | 4. List all archives in the repository:: 44 | 45 | $ attic list /somewhere/my-repository.attic 46 | Monday Mon Mar 24 11:59:35 2014 47 | Tuesday Tue Mar 25 12:00:10 2014 48 | 49 | 5. List the contents of the *Monday* archive:: 50 | 51 | $ attic list /somewhere/my-repository.attic::Monday 52 | drwxr-xr-x user group 0 Jan 06 15:22 home/user/Documents 53 | -rw-r--r-- user group 7961 Nov 17 2012 home/user/Documents/Important.doc 54 | ... 55 | 56 | 6. Restore the *Monday* archive:: 57 | 58 | $ attic extract /somwhere/my-repository.attic::Monday 59 | 60 | 7. Recover disk space by manually deleting the *Monday* archive:: 61 | 62 | $ attic delete /somwhere/my-backup.attic::Monday 63 | 64 | .. Note:: 65 | Attic is quiet by default. Add the ``-v`` or ``--verbose`` option to 66 | get progress reporting during command execution. 67 | 68 | Automating backups 69 | ------------------ 70 | 71 | The following example script backs up ``/home`` and 72 | ``/var/www`` to a remote server. The script also uses the 73 | :ref:`attic_prune` subcommand to maintain a certain number 74 | of old archives:: 75 | 76 | #!/bin/sh 77 | REPOSITORY=username@remoteserver.com:repository.attic 78 | 79 | # Backup all of /home and /var/www except a few 80 | # excluded directories 81 | attic create --stats \ 82 | $REPOSITORY::hostname-`date +%Y-%m-%d` \ 83 | /home \ 84 | /var/www \ 85 | --exclude /home/*/.cache \ 86 | --exclude /home/Ben/Music/Justin\ Bieber \ 87 | --exclude '*.pyc' 88 | 89 | # Use the `prune` subcommand to maintain 7 daily, 4 weekly 90 | # and 6 monthly archives. 91 | attic prune -v $REPOSITORY --keep-daily=7 --keep-weekly=4 --keep-monthly=6 92 | 93 | .. _encrypted_repos: 94 | 95 | Repository encryption 96 | --------------------- 97 | 98 | Repository encryption is enabled at repository creation time:: 99 | 100 | $ attic init --encryption=passphrase|keyfile PATH 101 | 102 | When repository encryption is enabled all data is encrypted using 256-bit AES_ 103 | encryption and the integrity and authenticity is verified using `HMAC-SHA256`_. 104 | 105 | All data is encrypted before being written to the repository. This means that 106 | an attacker that manages to compromise the host containing an encrypted 107 | archive will not be able to access any of the data. 108 | 109 | |project_name| supports two different methods to derive the AES and HMAC keys. 110 | 111 | Passphrase based encryption 112 | This method uses a user supplied passphrase to derive the keys using the 113 | PBKDF2_ key derivation function. This method is convenient to use since 114 | there is no key file to keep track of and secure as long as a *strong* 115 | passphrase is used. 116 | 117 | .. Note:: 118 | For automated backups the passphrase can be specified using the 119 | `ATTIC_PASSPHRASE` environment variable. 120 | 121 | Key file based encryption 122 | This method generates random keys at repository initialization time that 123 | are stored in a password protected file in the ``~/.attic/keys/`` directory. 124 | The key file is a printable text file. This method is secure and suitable 125 | for automated backups. 126 | 127 | .. Note:: 128 | The repository data is totally inaccessible without the key file 129 | so it must be kept **safe**. 130 | 131 | 132 | .. _remote_repos: 133 | 134 | Remote repositories 135 | ------------------- 136 | 137 | |project_name| can initialize and access repositories on remote hosts if the 138 | host is accessible using SSH. This is fastest and easiest when |project_name| 139 | is installed on the remote host, in which case the following syntax is used:: 140 | 141 | $ attic init user@hostname:repository.attic 142 | 143 | or:: 144 | 145 | $ attic init ssh://user@hostname:port/repository.attic 146 | 147 | If it is not possible to install |project_name| on the remote host, 148 | it is still possible to use the remote host to store a repository by 149 | mounting the remote filesystem, for example, using sshfs:: 150 | 151 | $ sshfs user@hostname:/path/to/folder /tmp/mymountpoint 152 | $ attic init /tmp/mymountpoint/repository.attic 153 | $ fusermount -u /tmp/mymountpoint 154 | 155 | However, be aware that sshfs doesn't fully implement POSIX locks, so 156 | you must be sure to not have two processes trying to access the same 157 | repository at the same time. 158 | -------------------------------------------------------------------------------- /docs/update_usage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ ! -d usage ]; then 3 | mkdir usage 4 | fi 5 | for cmd in change-passphrase check create delete extract info init list mount prune; do 6 | FILENAME="usage/$cmd.rst.inc" 7 | LINE=`echo -n attic $cmd | tr 'a-z- ' '-'` 8 | echo -e ".. _attic_$cmd:\n" > $FILENAME 9 | echo -e "attic $cmd\n$LINE\n::\n\n" >> $FILENAME 10 | attic help $cmd --usage-only | sed -e 's/^/ /' >> $FILENAME 11 | echo -e "\nDescription\n~~~~~~~~~~~\n" >> $FILENAME 12 | attic help $cmd --epilog-only >> $FILENAME 13 | done 14 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | .. include:: global.rst.inc 2 | .. _detailed_usage: 3 | 4 | Usage 5 | ===== 6 | 7 | |project_name| consists of a number of commands. Each command accepts 8 | a number of arguments and options. The following sections will describe each 9 | command in detail. 10 | 11 | Quiet by default 12 | ---------------- 13 | 14 | Like most UNIX commands |project_name| is quiet by default but the ``-v`` or 15 | ``--verbose`` option can be used to get the program to output more status 16 | messages as it is processing. 17 | 18 | .. include:: usage/init.rst.inc 19 | 20 | Examples 21 | ~~~~~~~~ 22 | :: 23 | 24 | # Local repository 25 | $ attic init /data/mybackuprepo.attic 26 | 27 | # Remote repository 28 | $ attic init user@hostname:mybackuprepo.attic 29 | 30 | # Encrypted remote repository 31 | $ attic init --encryption=passphrase user@hostname:mybackuprepo.attic 32 | 33 | 34 | .. include:: usage/create.rst.inc 35 | 36 | Examples 37 | ~~~~~~~~ 38 | :: 39 | 40 | # Backup ~/Documents into an archive named "my-documents" 41 | $ attic create /data/myrepo.attic::my-documents ~/Documents 42 | 43 | # Backup ~/Documents and ~/src but exclude pyc files 44 | $ attic create /data/myrepo.attic::my-files \ 45 | ~/Documents \ 46 | ~/src \ 47 | --exclude '*.pyc' 48 | 49 | # Backup the root filesystem into an archive named "root-YYYY-MM-DD" 50 | NAME="root-`date +%Y-%m-%d`" 51 | $ attic create /data/myrepo.attic::$NAME / --do-not-cross-mountpoints 52 | 53 | 54 | .. include:: usage/extract.rst.inc 55 | 56 | Examples 57 | ~~~~~~~~ 58 | :: 59 | 60 | # Extract entire archive 61 | $ attic extract /data/myrepo::my-files 62 | 63 | # Extract entire archive and list files while processing 64 | $ attic extract -v /data/myrepo::my-files 65 | 66 | # Extract the "src" directory 67 | $ attic extract /data/myrepo::my-files home/USERNAME/src 68 | 69 | # Extract the "src" directory but exclude object files 70 | $ attic extract /data/myrepo::my-files home/USERNAME/src --exclude '*.o' 71 | 72 | .. include:: usage/check.rst.inc 73 | 74 | .. include:: usage/delete.rst.inc 75 | 76 | .. include:: usage/list.rst.inc 77 | 78 | Examples 79 | ~~~~~~~~ 80 | :: 81 | 82 | $ attic list /data/myrepo 83 | my-files Thu Aug 1 23:33:22 2013 84 | my-documents Thu Aug 1 23:35:43 2013 85 | root-2013-08-01 Thu Aug 1 23:43:55 2013 86 | root-2013-08-02 Fri Aug 2 15:18:17 2013 87 | ... 88 | 89 | $ attic list /data/myrepo::root-2013-08-02 90 | drwxr-xr-x root root 0 Jun 05 12:06 . 91 | lrwxrwxrwx root root 0 May 31 20:40 bin -> usr/bin 92 | drwxr-xr-x root root 0 Aug 01 22:08 etc 93 | drwxr-xr-x root root 0 Jul 15 22:07 etc/ImageMagick-6 94 | -rw-r--r-- root root 1383 May 22 22:25 etc/ImageMagick-6/colors.xml 95 | ... 96 | 97 | 98 | .. include:: usage/prune.rst.inc 99 | 100 | Examples 101 | ~~~~~~~~ 102 | :: 103 | 104 | # Keep 7 end of day and 4 additional end of week archives: 105 | $ attic prune /data/myrepo --keep-daily=7 --keep-weekly=4 106 | 107 | # Same as above but only apply to archive names starting with "foo": 108 | $ attic prune /data/myrepo --keep-daily=7 --keep-weekly=4 --prefix=foo 109 | 110 | # Keep 7 end of day, 4 additional end of week archives, 111 | # and an end of month archive for every month: 112 | $ attic prune /data/myrepo --keep-daily=7 --keep-weekly=4 --keep-monthly=-1 113 | 114 | # Keep all backups in the last 10 days, 4 additional end of week archives, 115 | # and an end of month archive for every month: 116 | $ attic prune /data/myrepo --keep-within=10d --keep-weekly=4 --keep-monthly=-1 117 | 118 | 119 | .. include:: usage/info.rst.inc 120 | 121 | Examples 122 | ~~~~~~~~ 123 | :: 124 | 125 | $ attic info /data/myrepo::root-2013-08-02 126 | Name: root-2013-08-02 127 | Fingerprint: bc3902e2c79b6d25f5d769b335c5c49331e6537f324d8d3badcb9a0917536dbb 128 | Hostname: myhostname 129 | Username: root 130 | Time: Fri Aug 2 15:18:17 2013 131 | Command line: /usr/bin/attic create --stats /data/myrepo::root-2013-08-02 / --do-not-cross-mountpoints 132 | Number of files: 147429 133 | Original size: 5344169493 (4.98 GB) 134 | Compressed size: 1748189642 (1.63 GB) 135 | Unique data: 64805454 (61.80 MB) 136 | 137 | 138 | .. include:: usage/mount.rst.inc 139 | 140 | Examples 141 | ~~~~~~~~ 142 | :: 143 | 144 | $ attic mount /data/myrepo::root-2013-08-02 /tmp/mymountpoint 145 | $ ls /tmp/mymountpoint 146 | bin boot etc lib lib64 mnt opt root sbin srv usr var 147 | $ fusermount -u /tmp/mymountpoint 148 | 149 | 150 | .. include:: usage/change-passphrase.rst.inc 151 | 152 | Examples 153 | ~~~~~~~~ 154 | :: 155 | 156 | # Create a key file protected repository 157 | $ attic init --encryption=keyfile /tmp/encrypted-repo 158 | Initializing repository at "/tmp/encrypted-repo" 159 | Enter passphrase (empty for no passphrase): 160 | Enter same passphrase again: 161 | Key file "/home/USER/.attic/keys/tmp_encrypted_repo" created. 162 | Keep this file safe. Your data will be inaccessible without it. 163 | 164 | # Change key file passphrase 165 | $ attic change-passphrase /tmp/encrypted-repo 166 | Enter passphrase for key file /home/USER/.attic/keys/tmp_encrypted_repo: 167 | New passphrase: 168 | Enter same passphrase again: 169 | Key file "/home/USER/.attic/keys/tmp_encrypted_repo" updated 170 | -------------------------------------------------------------------------------- /scripts/attic: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from attic.archiver import main 3 | main() 4 | 5 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 *-* 2 | import os 3 | import sys 4 | from glob import glob 5 | 6 | import versioneer 7 | versioneer.versionfile_source = 'attic/_version.py' 8 | versioneer.versionfile_build = 'attic/_version.py' 9 | versioneer.tag_prefix = '' 10 | versioneer.parentdir_prefix = 'Attic-' # dirname like 'myproject-1.2.0' 11 | 12 | min_python = (3, 2) 13 | if sys.version_info < min_python: 14 | print("Attic requires Python %d.%d or later" % min_python) 15 | sys.exit(1) 16 | 17 | try: 18 | from setuptools import setup, Extension 19 | except ImportError: 20 | from distutils.core import setup, Extension 21 | 22 | crypto_source = 'attic/crypto.pyx' 23 | chunker_source = 'attic/chunker.pyx' 24 | hashindex_source = 'attic/hashindex.pyx' 25 | platform_linux_source = 'attic/platform_linux.pyx' 26 | platform_darwin_source = 'attic/platform_darwin.pyx' 27 | platform_freebsd_source = 'attic/platform_freebsd.pyx' 28 | 29 | try: 30 | from Cython.Distutils import build_ext 31 | import Cython.Compiler.Main as cython_compiler 32 | 33 | class Sdist(versioneer.cmd_sdist): 34 | def __init__(self, *args, **kwargs): 35 | for src in glob('attic/*.pyx'): 36 | cython_compiler.compile(glob('attic/*.pyx'), 37 | cython_compiler.default_options) 38 | versioneer.cmd_sdist.__init__(self, *args, **kwargs) 39 | 40 | def make_distribution(self): 41 | self.filelist.extend(['attic/crypto.c', 'attic/chunker.c', 'attic/_chunker.c', 'attic/hashindex.c', 'attic/_hashindex.c', 'attic/platform_linux.c', 'attic/platform_freebsd.c', 'attic/platform_darwin.c']) 42 | super(Sdist, self).make_distribution() 43 | 44 | except ImportError: 45 | class Sdist(versioneer.cmd_sdist): 46 | def __init__(self, *args, **kwargs): 47 | raise Exception('Cython is required to run sdist') 48 | 49 | crypto_source = crypto_source.replace('.pyx', '.c') 50 | chunker_source = chunker_source.replace('.pyx', '.c') 51 | hashindex_source = hashindex_source.replace('.pyx', '.c') 52 | platform_linux_source = platform_linux_source.replace('.pyx', '.c') 53 | platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c') 54 | platform_darwin_source = platform_darwin_source.replace('.pyx', '.c') 55 | from distutils.command.build_ext import build_ext 56 | if not all(os.path.exists(path) for path in [crypto_source, chunker_source, hashindex_source, platform_linux_source, platform_freebsd_source]): 57 | raise ImportError('The GIT version of Attic needs Cython. Install Cython or use a released version') 58 | 59 | 60 | def detect_openssl(prefixes): 61 | for prefix in prefixes: 62 | filename = os.path.join(prefix, 'include', 'openssl', 'evp.h') 63 | if os.path.exists(filename): 64 | with open(filename, 'r') as fd: 65 | if 'PKCS5_PBKDF2_HMAC(' in fd.read(): 66 | return prefix 67 | 68 | 69 | possible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl', '/usr/local/attic', '/opt/local'] 70 | if os.environ.get('ATTIC_OPENSSL_PREFIX'): 71 | possible_openssl_prefixes.insert(0, os.environ.get('ATTIC_OPENSSL_PREFIX')) 72 | ssl_prefix = detect_openssl(possible_openssl_prefixes) 73 | if not ssl_prefix: 74 | raise Exception('Unable to find OpenSSL >= 1.0 headers. (Looked here: {})'.format(', '.join(possible_openssl_prefixes))) 75 | include_dirs = [os.path.join(ssl_prefix, 'include')] 76 | library_dirs = [os.path.join(ssl_prefix, 'lib')] 77 | 78 | 79 | with open('README.rst', 'r') as fd: 80 | long_description = fd.read() 81 | 82 | cmdclass = versioneer.get_cmdclass() 83 | cmdclass.update({'build_ext': build_ext, 'sdist': Sdist}) 84 | 85 | ext_modules = [ 86 | Extension('attic.crypto', [crypto_source], libraries=['crypto'], include_dirs=include_dirs, library_dirs=library_dirs), 87 | Extension('attic.chunker', [chunker_source]), 88 | Extension('attic.hashindex', [hashindex_source]) 89 | ] 90 | if sys.platform.startswith('linux'): 91 | ext_modules.append(Extension('attic.platform_linux', [platform_linux_source], libraries=['acl'])) 92 | elif sys.platform.startswith('freebsd'): 93 | ext_modules.append(Extension('attic.platform_freebsd', [platform_freebsd_source])) 94 | elif sys.platform == 'darwin': 95 | ext_modules.append(Extension('attic.platform_darwin', [platform_darwin_source])) 96 | 97 | setup( 98 | name='Attic', 99 | version=versioneer.get_version(), 100 | author='Jonas Borgstrom', 101 | author_email='jonas@borgstrom.se', 102 | url='https://attic-backup.org/', 103 | description='Deduplicated backups', 104 | long_description=long_description, 105 | license='BSD', 106 | platforms=['Linux', 'MacOS X'], 107 | classifiers=[ 108 | 'Development Status :: 4 - Beta', 109 | 'Environment :: Console', 110 | 'Intended Audience :: System Administrators', 111 | 'License :: OSI Approved :: BSD License', 112 | 'Operating System :: POSIX :: BSD :: FreeBSD', 113 | 'Operating System :: MacOS :: MacOS X', 114 | 'Operating System :: POSIX :: Linux', 115 | 'Programming Language :: Python', 116 | 'Topic :: Security :: Cryptography', 117 | 'Topic :: System :: Archiving :: Backup', 118 | ], 119 | packages=['attic', 'attic.testsuite'], 120 | scripts=['scripts/attic'], 121 | cmdclass=cmdclass, 122 | ext_modules=ext_modules, 123 | install_requires=['msgpack-python'] 124 | ) 125 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py32, py33, py34 3 | 4 | [testenv] 5 | # Change dir to avoid import problem 6 | changedir = docs 7 | commands = {envpython} -m attic.testsuite.run -bv [] 8 | 9 | [testenv:py32] 10 | deps = mock 11 | --------------------------------------------------------------------------------