├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── pyznap ├── __init__.py ├── clean.py ├── config │ └── pyznap.conf ├── main.py ├── process.py ├── pyzfs.py ├── send.py ├── ssh.py ├── take.py └── utils.py ├── setup.cfg ├── setup.py └── tests ├── test_functions.py ├── test_functions_ssh.py ├── test_pyznap.py ├── test_pyznap_ssh.py └── test_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | __pycache__/ 3 | .cache/ 4 | .pytest_cache/ 5 | *.pyc 6 | /dist/ 7 | /build/ 8 | /*.egg-info 9 | /*.egg 10 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to pyznap will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | 8 | ## [1.6.0] - 2020-09-22 9 | ### Added 10 | - Added resumable send/receive. 11 | Use the `resume` option in the config file or the `-r` flag for `pyznap send`. 12 | - Added options `dest_auto_create`, `retries` and `retry_interval`. 13 | 14 | 15 | ## [1.5.0] - 2020-03-15 16 | ### Added 17 | - Added zfs raw send for encrypted or compressed datasets. 18 | Use the `raw_send` option in the config file or the `-w` flag for `pyznap send`. 19 | 20 | 21 | ## [1.4.3] - 2019-09-07 22 | ### Fixed 23 | - pyznap would falsely assume executables such as 'pv' exist on SmartOS even when not present. 24 | 25 | 26 | ## [1.4.2] - 2019-08-30 27 | ### Fixed 28 | - Catch DatasetNotFoundError if dataset was destroyed after starting pyznap. 29 | 30 | 31 | ## [1.4.1] - 2019-08-27 32 | ### Fixed 33 | - Close stderr to detect broken pipe. 34 | - Raise CalledProcessError if there is any error during zfs receive. 35 | 36 | 37 | ## [1.4.0] - 2019-08-27 38 | ### Added 39 | - You can now exclude datasets when sending using [Unix shell-type wildcards](https://docs.python.org/3/library/fnmatch.html). 40 | Use the `exclude` keyword in the config or the `-e` flag for `pyznap send`. 41 | 42 | 43 | ## [1.3.0] - 2019-08-22 44 | ### Added 45 | - pyznap can now pull data over ssh, i.e. you can now send form local to local, local to remote, 46 | remote to local and remote to remote. Note that remote to remote is not direct, but via the local 47 | machine. 48 | - `pv` now outputs status once per minute when stdout is redirected (e.g. to a file). 49 | 50 | ### Changed 51 | - Rewrote local/remote 'zfs send' commands in a more uniform manner. 52 | 53 | ### Fixed 54 | - Enforce python>=3.5 in setup.py. 55 | 56 | 57 | ## [1.2.1] - 2019-07-15 58 | ### Fixed 59 | - Removed `configparser` dependency. 60 | 61 | 62 | ## [1.2.0] - 2019-07-14 63 | ### Added 64 | - pyznap now uses compression for sending over ssh. Current supported methods are `none`, `lzop` 65 | (default), `lz4`, `gzip`, `pigz`, `bzip2` and `xz`. There is a new config option (e.g. `compress = none`) 66 | and a new flag `-c` for `pyznap send`. 67 | - `mbuffer` is now also used on the dest when sending over ssh. 68 | 69 | ### Changed 70 | - Rewrote how commands are executed over ssh: Implemented own SSH class, removed paramiko dependency. 71 | - General code cleanup. 72 | 73 | 74 | ## [1.1.3] - 2019-07-14 75 | ### Fixed 76 | - Send would fail on FreeBSD due to missing stream_size. 77 | 78 | 79 | ## [1.1.2] - 2018-11-27 80 | ### Added 81 | - Catch KeyboardInterrupt exceptions. 82 | 83 | ### Changed 84 | - Code cleanup. 85 | 86 | 87 | ## [1.1.1] - 2018-11-17 88 | ### Changed 89 | - Changed frequency of 'frequent' snapshots to 1 minute. Interval at which 'frequent' snapshots 90 | are taken can be controlled by cronjob. This allows users to take snapshots at different intervals 91 | (1min, 5min, 15min, ...). 92 | - Code cleanup in process.py. No more overwriting of subprocess functions. 93 | 94 | ### Fixed 95 | - Fixed pv width to 100 chars. 96 | 97 | 98 | ## [1.1.0] - 2018-10-15 99 | ### Added 100 | - pyznap now uses `pv` to show progress of zfs send operations. 101 | - Better error handling during zfs send over ssh. 102 | 103 | ### Fixed 104 | - Changed readme to only mention python 3.5+. 105 | 106 | 107 | ## [1.0.2] - 2018-08-15 108 | ### Added 109 | - More verbose error messages when CalledProcessError is raised. 110 | 111 | ### Fixed 112 | - Send over ssh would fail with OSError if dataset has no space left. 113 | 114 | 115 | ## [1.0.1] - 2018-08-13 116 | ### Added 117 | - pyznap now checks if the dest filesystem has a 'zfs receive' ongoing before trying to send. 118 | - Added more helpful error message when source/dest do not exist. 119 | - Added a changelog. 120 | 121 | ### Fixed 122 | - Fixed bug where ssh connection would be opened but not closed if dataset does not exist. 123 | 124 | 125 | ## [1.0.0] - 2018-08-10 126 | ### Added 127 | - Added tests to test pyznap running over time. 128 | 129 | ### Changed 130 | - Code cleanup. 131 | - Changed some docstrings. 132 | - Extended Readme. 133 | 134 | ### Fixed 135 | - Fixed multiline ZFS errors not being matched. 136 | 137 | 138 | ## [0.9.1] - 2018-08-08 139 | ### Fixed 140 | - Logging was writing to stderr instead of stdout. 141 | 142 | 143 | ## [0.9.0] - 2018-08-07 144 | ### Added 145 | - First release on PyPI. 146 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include pyznap/config/*.conf -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all install-dev test release release-test clean 2 | 3 | all: test 4 | 5 | install-dev: 6 | pip install -e .[dev] 7 | 8 | test: clean install-dev 9 | pytest 10 | 11 | release: 12 | pip install twine 13 | python setup.py sdist bdist_wheel 14 | twine upload dist/* 15 | rm -f -r build/ dist/ pyznap.egg-info/ 16 | 17 | release-test: 18 | pip install twine 19 | python setup.py sdist bdist_wheel 20 | twine upload --repository-url https://test.pypi.org/legacy/ dist/* 21 | rm -f -r build/ dist/ pyznap.egg-info/ 22 | 23 | clean: 24 | rm -f -r build/ 25 | rm -f -r dist/ 26 | rm -f -r pyznap/__pycache__/ 27 | rm -f -r tests/__pycache__ 28 | rm -f -r pyznap.egg-info/ 29 | rm -f -r .pytest_cache/ 30 | rm -f -r .cache/ 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # README # 2 | 3 | pyznap is a ZFS snapshot management tool. It automatically takes and deletes snapshots and can send 4 | them to different backup locations. You can specify a policy for a given filesystem in the 5 | pyznap.conf file and then use cron to let it run regularly. pyznap includes zfs 6 | bindings for python, forked and modified from https://bitbucket.org/stevedrake/weir/. 7 | 8 | 9 | #### How does it work? #### 10 | 11 | pyznap regularly takes and deletes snapshots according to a specified policy. You can take frequent, 12 | hourly, daily, weekly, monthly and yearly snapshots. 'frequent' snapshots can be taken up to once 13 | per minute, the frequency can be adjusted by the cronjob frequency. Old snapshots are deleted as 14 | you take new ones, thinning out the history as it gets older. 15 | 16 | Datasets can also be replicated to other pools on the same system or remotely over ssh. After an 17 | initial sync, backups will be done incrementally as long as there are common snapshots between the 18 | source and the destination. 19 | 20 | 21 | #### Requirements #### 22 | 23 | pyznap is written in python 3.5+ and has no dependencies. For developing and running the tests you 24 | need: 25 | 26 | pytest 27 | pytest-dependency 28 | paramiko 29 | 30 | You also need the `faketime` program for some tests to simulate pyznap running over time. 31 | 32 | I suggest installing [virtualenv & virtualenvwrapper](http://docs.python-guide.org/en/latest/dev/virtualenvs/), 33 | so you don't clutter your system python installation with additional packages. 34 | 35 | pyznap uses `mbuffer` and `lzop` (by default) to speed up zfs send/recv, and `pv` to show progress, 36 | but also works if they are not installed. Other supported compression methods are: `none`, `lz4`, 37 | `gzip`, `pigz`, `bzip2` and `xz`. 38 | 39 | Note that ZFS needs root access to run commands. Due to this you should install pyznap under your 40 | root user. 41 | 42 | 43 | #### How do I set it up? #### 44 | 45 | pyznap can easily be installed with pip. In your virtualenv just run 46 | 47 | pip install pyznap 48 | 49 | and pyznap & its requirements will be installed. This should also create an executable in your PATH, 50 | either at `/path/to/virtualenv/pyznap/bin/pyznap` or `/usr/local/bin/pyznap`. If you use your 51 | system python installation you might want to use the `--user` flag. In this case the executable will 52 | be located at `~/.local/bin/pyznap`. 53 | 54 | Before you can use pyznap, you will need to create a config file. For initial setup run 55 | 56 | pyznap setup [-p PATH] 57 | 58 | This will create a directory `PATH` (default is `/etc/pyznap/`) and copy a sample config there. A 59 | config for your system might look like this (remove the comments): 60 | 61 | [rpool/filesystem] 62 | frequent = 4 # Keep 4 frequent snapshots 63 | hourly = 24 # Keep 24 hourly snapshots 64 | daily = 7 # Keep 7 daily snapshots 65 | weekly = 4 # Keep 4 weekly snapshots 66 | monthly = 6 # Keep 6 monthly snapshots 67 | yearly = 1 # Keep 1 yearly snapshot 68 | snap = yes # Take snapshots on this filesystem 69 | clean = yes # Delete old snapshots on this filesystem 70 | dest = backup/filesystem # Backup this filesystem on this location 71 | exclude = rpool/filesystem/data/* # Exclude these datasets for pyznap send 72 | 73 | Then set up a cronjob by creating a file under `/etc/cron.d/` 74 | 75 | nano /etc/cron.d/pyznap 76 | 77 | and let pyznap run regularly by adding the following lines 78 | 79 | SHELL=/bin/sh 80 | PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin 81 | 82 | */15 * * * * root /path/to/pyznap snap >> /var/log/pyznap.log 2>&1 83 | 84 | This will run pyznap every quarter hour to take and delete snapshots. 'frequent' snapshots can be 85 | taken up to once per minute, so adjust your cronjob accordingly. 86 | 87 | If you also want to send your filesystems to another location you can add a line 88 | 89 | 0 0 * * * root /path/to/pyznap send >> /var/log/pyznap.log 2>&1 90 | 91 | This will backup your data once per day at 12am. 92 | 93 | You can also manage, send to and pull from remote ssh locations. Always specify ssh locations with 94 | 95 | ssh:port:user@host:rpool/data 96 | 97 | If you omit the port (`ssh::user@host:...`) the default `22` is assumed. A sample config which backs 98 | up a filesystem to a remote location looks like: 99 | 100 | [rpool/data] 101 | hourly = 24 102 | snap = yes 103 | clean = yes 104 | dest = ssh:22:user@host:backup/data # Specify ssh destination 105 | dest_keys = /home/user/.ssh/id_rsa # Provide key for ssh login. If none given, look in home dir 106 | compress = gzip # Use gzip compression for sending over ssh 107 | 108 | To pull a filesystem from a remote location use: 109 | 110 | [ssh::user@host:rpool/data] # Specify ssh source 111 | key = /home/user/.ssh/id_rsa # Provide key for ssh login. If none given, look in home dir 112 | dest = tank/data 113 | compress = lz4 114 | 115 | You can exclude datasets when sending using [Unix shell-style wildcards](https://docs.python.org/3/library/fnmatch.html): 116 | 117 | [rpool] 118 | dest = backup/rpool, tank/rpool 119 | exclude = rpool/data rpool/home/*/documents rpool/home/user1, rpool/home* 120 | 121 | Note that exclude rules are separated by a `,` for the different dests, and you can specify multiple 122 | rules for a single dest by separating them with a space. Exclude rules thus cannot contain any comma 123 | or whitespace. 124 | 125 | I would also suggest making sure that root has ownership for all files, s.t. no user can modify them. 126 | If that is not the case just run 127 | 128 | chown root:root -R /etc/pyznap/ 129 | 130 | 131 | #### Config options #### 132 | 133 | Here is a list of all options you can set in the config fie: 134 | 135 | | Option | Input | Description | 136 | |--------------------|-----------------|-------------| 137 | | `key` | String | Path to ssh keyfile for source | 138 | | `frequent` | Integer | Number of frequent snapshots. These can be created every minute (whenever pyznap is called) | 139 | | `hourly` | Integer | Number of hourly snapshots | 140 | | `daily` | Integer | Number of daily snapshots | 141 | | `weekly` | Integer | Number of weekly snapshots | 142 | | `monthly` | Integer | Number of monthly snapshots | 143 | | `yearly` | Integer | Number of yearly snapshots | 144 | | `snap` | yes/no | Should snapshots be taken | 145 | | `clean` | yes/no | Should snapshots be cleaned | 146 | | `dest` | List of string | Comma-separated list of destinations where to send source filesystem | 147 | | `dest_key` | List of string | Path to ssh keyfile for dest. Comma-separated list for multiple dest | 148 | | `compress` | List of string | Compression to use over ssh, supported are gzip, lzop, bzip2, pigz, xz & lz4. Default is lzop. Comma-separated list for multiple dest | 149 | | `exclude` | List of string | Whitespace-separated list of datasets to exclude from sending. Exclude lists for different dests are separated by comma | 150 | | `raw_send` | List of yes/no | Use zfs raw send. Comma-separated list for multiple dest | 151 | | `resume` | List of yes/no | Use resumable send/receive. Comma-separated list for multiple dest | 152 | | `dest_auto_create` | List of yes/no | Automatically create missing root datasets. Comma-separated list for multiple dest | 153 | | `retries` | List of integer | Number of retries on connection issues. Comma-separated list for multiple dest | 154 | | `retry_interval` | List of integer | Time in seconds between retries. Comma-separated list for multiple dest | 155 | 156 | 157 | #### Command line options #### 158 | 159 | Run `pyznap -h` to see all available options. 160 | 161 | + --config 162 | 163 | Specify config file. Default is `/etc/pyznap/pyznap.conf`. 164 | 165 | + -v, --versbose 166 | 167 | Print more verbose output. 168 | 169 | + setup [-p PATH] 170 | 171 | Initial setup. Creates a config dir and puts a sample config file there. You can specify the path 172 | to the config dir with the `-p` flag, default is `/etc/pyznap/`. 173 | 174 | + snap 175 | 176 | Interface to the snapshot management tool. Has three optional arguments: 177 | 178 | + --take 179 | 180 | Takes snapshots according to policy in the config file. 181 | 182 | + --clean 183 | 184 | Deletes old snapshots according to policy. 185 | 186 | + --full 187 | 188 | First takes snapshots, then deletes old ones. Default when no other option is given. 189 | 190 | + send 191 | 192 | Interface to the zfs send/receive tool. Has two usages: 193 | 194 | + No further option is given 195 | 196 | Send snapshots to backup locations according to policy. 197 | 198 | + -s SOURCE -d DESTINATION [-c COMPRESSION] [-i KEYFILE] [-j SOURCE_KEY] [-k DEST_KEY] [-e EXCLUDE] [-w] [-r] [--dest-auto-create] [--retries RETRIES] [--retry-interval RETRY_INTERVAL] 199 | 200 | Send source filesystem to destination filesystem. If either source OR dest is a remote location, 201 | you can specify the keyfile with the `-i` flag. If both source AND dest are remote, you specify 202 | the keyfiles with the flag `-j` for the source and `-k` for the dest. You can also turn on 203 | compression with the `-c` flag. Currently supported options are: `none`, `lzop`, `lz4`, `gzip`, 204 | `pigz`, `bzip2` and `xz`. If no option is given, `lzop` is used if available. You can specify 205 | multiple (whitespace separated) wildcard exclude rules with the `-e` flag. Note that you should 206 | probably pass these as strings or escape the wildcard (e.g. `-e '*/data'` or `-e \*/data`), else 207 | your shell might expand the pattern. ZFS raw send can be enabled with the `-w` flag, in which case 208 | compression will be disabled. Resumable zfs send/receive can be enabled with the `-r` flag. You 209 | can specify a number of retries on connection issues with the `--retries` option, and set the 210 | retry interval with `--retry-interval`. Normally pyznap will not create missing root datasets, 211 | but you can set the `--dest-auto-create` flag to automatically create it. 212 | 213 | 214 | #### Usage examples #### 215 | 216 | + Take snapshots according to policy in default config file: 217 | 218 | `pyznap snap --take` 219 | 220 | + Clean snapshots according to policy in another config file: 221 | 222 | `pyznap --config /etc/pyznap/data.conf snap --clean` 223 | 224 | + Take and clean snapshots according to policy in default config file: 225 | 226 | `pyznap snap` 227 | 228 | + Backup snapshots according to policy in default config file: 229 | 230 | `pyznap send` 231 | 232 | + Backup a single filesystem locally: 233 | 234 | `pyznap send -s tank/data -d backup/data` 235 | 236 | + Send a single filesystem to a remote location, using `pigz` compression: 237 | 238 | `pyznap send -s tank/data -d ssh:20022:root@example.com:backup/data -i /root/.ssh/id_rsa -c pigz` 239 | 240 | + Pull a single filesystem from a remote location: 241 | 242 | `pyznap send -s ssh::root@example.com:tank/data -d backup/data -c lz4` 243 | 244 | + Pull a single filesystem from a remote location and send it to another remote location: 245 | 246 | `pyznap send -s ssh::root@example1.com:tank/data -d ssh::root@example2.com:backup/data -j /root/.ssh/id_rsa_1 -k /root/.ssh/id_rsa_2` 247 | 248 | + Backup a single filesystem and exclude some datasets: 249 | 250 | `pyznap send -s tank -d backup/tank -e '/tank/data*' '/tank/home/user1*' '*/user2/docs'` 251 | -------------------------------------------------------------------------------- /pyznap/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap 3 | ~~~~~~~~~~~~~~ 4 | 5 | pyznap - ZFS snapshot tool written in python. 6 | 7 | :copyright: (c) 2018-2020 by Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | 12 | __version__ = '1.6.0' 13 | -------------------------------------------------------------------------------- /pyznap/clean.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.clean 3 | ~~~~~~~~~~~~~~ 4 | 5 | Clean snapshots. 6 | 7 | :copyright: (c) 2018-2019 by Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | import logging 12 | from datetime import datetime 13 | from subprocess import CalledProcessError 14 | from .ssh import SSH, SSHException 15 | from .utils import parse_name 16 | import pyznap.pyzfs as zfs 17 | from .process import DatasetBusyError, DatasetNotFoundError 18 | 19 | 20 | def clean_snap(snap): 21 | """Deletes a snapshot 22 | 23 | Parameters 24 | ---------- 25 | snap : {ZFSSnapshot} 26 | Snapshot to destroy 27 | """ 28 | 29 | logger = logging.getLogger(__name__) 30 | 31 | logger.info('Deleting snapshot {}...'.format(snap)) 32 | try: 33 | snap.destroy() 34 | except DatasetBusyError as err: 35 | logger.error(err) 36 | except CalledProcessError as err: 37 | logger.error('Error while deleting snapshot {}: \'{:s}\'...' 38 | .format(snap, err.stderr.rstrip())) 39 | except KeyboardInterrupt: 40 | logger.error('KeyboardInterrupt while cleaning snapshot {}...' 41 | .format(snap)) 42 | raise 43 | 44 | 45 | def clean_filesystem(filesystem, conf): 46 | """Deletes snapshots of a single filesystem according to conf. 47 | 48 | Parameters: 49 | ---------- 50 | filesystem : {ZFSFilesystem} 51 | Filesystem to clean 52 | conf : {dict} 53 | Config entry with snapshot strategy 54 | """ 55 | 56 | logger = logging.getLogger(__name__) 57 | logger.debug('Cleaning snapshots on {}...'.format(filesystem)) 58 | 59 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 60 | # catch exception if dataset was destroyed since pyznap was started 61 | try: 62 | fs_snapshots = filesystem.snapshots() 63 | except (DatasetNotFoundError, DatasetBusyError) as err: 64 | logger.error('Error while opening {}: {}...'.format(filesystem, err)) 65 | return 1 66 | # categorize snapshots 67 | for snap in fs_snapshots: 68 | # Ignore snapshots not taken with pyznap or sanoid 69 | if not snap.name.split('@')[1].startswith(('pyznap', 'autosnap')): 70 | continue 71 | try: 72 | snap_type = snap.name.split('_')[-1] 73 | snapshots[snap_type].append(snap) 74 | except (ValueError, KeyError): 75 | continue 76 | 77 | # Reverse sort by time taken 78 | for snaps in snapshots.values(): 79 | snaps.reverse() 80 | 81 | for snap in snapshots['yearly'][conf['yearly']:]: 82 | clean_snap(snap) 83 | 84 | for snap in snapshots['monthly'][conf['monthly']:]: 85 | clean_snap(snap) 86 | 87 | for snap in snapshots['weekly'][conf['weekly']:]: 88 | clean_snap(snap) 89 | 90 | for snap in snapshots['daily'][conf['daily']:]: 91 | clean_snap(snap) 92 | 93 | for snap in snapshots['hourly'][conf['hourly']:]: 94 | clean_snap(snap) 95 | 96 | for snap in snapshots['frequent'][conf['frequent']:]: 97 | clean_snap(snap) 98 | 99 | 100 | def clean_config(config): 101 | """Deletes old snapshots according to strategies given in config. Goes through each config, 102 | opens up ssh connection if necessary and then recursively calls clean_filesystem. 103 | 104 | Parameters: 105 | ---------- 106 | config : {list of dict} 107 | Full config list containing all strategies for different filesystems 108 | """ 109 | 110 | logger = logging.getLogger(__name__) 111 | logger.info('Cleaning snapshots...') 112 | 113 | for conf in config: 114 | if not conf.get('clean', None): 115 | continue 116 | 117 | name = conf['name'] 118 | try: 119 | _type, fsname, user, host, port = parse_name(name) 120 | except ValueError as err: 121 | logger.error('Could not parse {:s}: {}...'.format(name, err)) 122 | continue 123 | 124 | if _type == 'ssh': 125 | try: 126 | ssh = SSH(user, host, port=port, key=conf['key']) 127 | except (FileNotFoundError, SSHException): 128 | continue 129 | name_log = '{:s}@{:s}:{:s}'.format(user, host, fsname) 130 | else: 131 | ssh = None 132 | name_log = fsname 133 | 134 | try: 135 | # Children includes the base filesystem (named 'fsname') 136 | children = zfs.find(path=fsname, types=['filesystem', 'volume'], ssh=ssh) 137 | except DatasetNotFoundError as err: 138 | logger.error('Dataset {:s} does not exist...'.format(name_log)) 139 | continue 140 | except ValueError as err: 141 | logger.error(err) 142 | continue 143 | except CalledProcessError as err: 144 | logger.error('Error while opening {:s}: \'{:s}\'...' 145 | .format(name_log, err.stderr.rstrip())) 146 | else: 147 | # Clean snapshots of parent filesystem 148 | clean_filesystem(children[0], conf) 149 | # Clean snapshots of all children that don't have a seperate config entry 150 | for child in children[1:]: 151 | # Check if any of the parents (but child of base filesystem) have a config entry 152 | for parent in children[1:]: 153 | if ssh: 154 | child_name = 'ssh:{:d}:{:s}@{:s}:{:s}'.format(port, user, host, child.name) 155 | parent_name = 'ssh:{:d}:{:s}@{:s}:{:s}'.format(port, user, host, parent.name) 156 | else: 157 | child_name = child.name 158 | parent_name = parent.name 159 | # Skip if child has an entry or if any parent entry already in config 160 | child_parent = '/'.join(child_name.split('/')[:-1]) # get parent of child filesystem 161 | if ((child_name == parent_name or child_parent.startswith(parent_name)) and 162 | (parent_name in [entry['name'] for entry in config])): 163 | break 164 | else: 165 | clean_filesystem(child, conf) 166 | finally: 167 | if ssh: 168 | ssh.close() 169 | -------------------------------------------------------------------------------- /pyznap/config/pyznap.conf: -------------------------------------------------------------------------------- 1 | ## Sample config file for pyznap. Save the config under /etc/pyznap/pyznap.conf or use the 2 | ## --config flag. Values get passed down recursively, so if you want a child filesystem to have 3 | ## a different strategy you only need to specify the values that are different from the parent 4 | ## filesystem. For remote syncronisation always keep enough snapshots on the destination. If there 5 | ## are no common snapshots the destination has to be destroyed and a full stream has to be sent. 6 | ## ssh locations are always specified with 'ssh:port:user@host:poolname/filesystem'. 7 | ## Remove the comments at the end of the lines in your config, as they will not be ignored. Only 8 | ## lines starting with '#' will be ignored. 9 | # 10 | # 11 | # 12 | ## Some example configs: 13 | # 14 | ## Take regular snapshots on a filesystem 15 | # [rpool/filesystem] 16 | # frequent = 4 # Keep 4 frequent snapshots 17 | # hourly = 24 # Keep 24 hourly snapshots 18 | # daily = 7 # Keep 7 daily snapshots 19 | # weekly = 4 # Keep 4 weekly snapshots 20 | # monthly = 6 # Keep 6 monthly snapshots 21 | # yearly = 1 # Keep 1 yearly snapshot 22 | # snap = yes # Take snapshots on this filesystem 23 | # clean = yes # Delete old snapshots on this filesystem 24 | # dest = backup/filesystem # Backup this filesystem on this location 25 | # exclude = rpool/filesystem/data/* # Exclude these datasets for pyznap send 26 | # 27 | # 28 | ## Missing values will be filled automatically if parent is in config 29 | # [rpool/filesystem/data] 30 | # hourly = 12 # Strategy will be hourly=12, daily=7, weekly=4, ... 31 | # dest = data/backup # Send child to another location as well 32 | # 33 | # 34 | ## Backup location 35 | # [backup/filesystem] 36 | # hourly = 48 # Keep more older snapshots in backup location 37 | # snap = no # Don't take new snapshots on backup location 38 | # clean = yes # Delete old snapshots 39 | # 40 | ## You can send backups to a remote location 41 | # [rpool/data] 42 | # hourly = 24 43 | # snap = yes 44 | # clean = yes 45 | # dest = ssh:22:user@host:backup/data # Specify ssh destination 46 | # dest_keys = /home/user/.ssh/id_rsa # Provide key for ssh login. If none given, look in home dir 47 | # compress = gzip # Use gzip compression for sending over ssh 48 | # 49 | ## You can also take snapshots on a remote and pull snapshots from there 50 | # [ssh:22:user@host:rpool/data] 51 | # key = /home/user/.ssh/id_rsa # Provide key for ssh login. If none given, look in home dir 52 | # hourly = 24 53 | # snap = yes 54 | # clean = yes 55 | # dest = tank/data 56 | # compress = lz4 57 | # 58 | ## You can give multiple dest. Filesystem will be sent to all of them 59 | # [tank/data] 60 | # hourly = 24 61 | # snap = yes 62 | # clean = yes 63 | # dest = backup/data, ssh::user@host0:backup/data, ssh:22:user@host1:backup/data 64 | # dest_keys = /home/user/.ssh/id_rsa, /home/user/.ssh/id_rsa 65 | # compress = lzop, none 66 | # raw_send = no, yes 67 | # exclude = tank/data/home, */home */media/* */logs 68 | -------------------------------------------------------------------------------- /pyznap/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | pyznap.main 4 | ~~~~~~~~~~~~~~ 5 | 6 | ZFS snapshot tool written in python. 7 | 8 | :copyright: (c) 2018-2019 by Yannick Boetzel. 9 | :license: GPLv3, see LICENSE for more details. 10 | """ 11 | 12 | import sys 13 | import os 14 | import logging 15 | from logging.config import fileConfig 16 | from argparse import ArgumentParser 17 | from datetime import datetime 18 | from .utils import read_config, create_config 19 | from .clean import clean_config 20 | from .take import take_config 21 | from .send import send_config 22 | 23 | 24 | DIRNAME = os.path.dirname(os.path.abspath(__file__)) 25 | CONFIG_DIR = '/etc/pyznap/' 26 | 27 | def _main(): 28 | """pyznap main function. Parses arguments and calls snap/clean/send functions accordingly. 29 | 30 | Returns 31 | ------- 32 | int 33 | Exit code 34 | """ 35 | 36 | parser = ArgumentParser(prog='pyznap', description='ZFS snapshot tool written in python') 37 | parser.add_argument('-v', '--verbose', action="store_true", 38 | dest="verbose", help='print more verbose output') 39 | parser.add_argument('--config', action="store", 40 | dest="config", help='path to config file') 41 | subparsers = parser.add_subparsers(dest='command') 42 | 43 | parser_setup = subparsers.add_parser('setup', help='initial setup') 44 | parser_setup.add_argument('-p', '--path', action='store', 45 | dest='path', help='pyznap config dir. default is {:s}'.format(CONFIG_DIR)) 46 | 47 | parser_snap = subparsers.add_parser('snap', help='zfs snapshot tools') 48 | parser_snap.add_argument('--take', action="store_true", 49 | help='take snapshots according to config file') 50 | parser_snap.add_argument('--clean', action="store_true", 51 | help='clean old snapshots according to config file') 52 | parser_snap.add_argument('--full', action="store_true", 53 | help='take snapshots then clean old according to config file') 54 | 55 | parser_send = subparsers.add_parser('send', help='zfs send/receive tools') 56 | parser_send.add_argument('-s', '--source', action="store", 57 | dest='source', help='source filesystem') 58 | parser_send.add_argument('-d', '--dest', action="store", 59 | dest='dest', help='destination filesystem') 60 | parser_send.add_argument('-i', '--key', action="store", 61 | dest='key', help='ssh key if only source or dest is remote') 62 | parser_send.add_argument('-j', '--source-key', action="store", 63 | dest='source_key', help='ssh key for source if both are remote') 64 | parser_send.add_argument('-k', '--dest-key', action="store", 65 | dest='dest_key', help='ssh key for dest if both are remote') 66 | parser_send.add_argument('-c', '--compress', action="store", 67 | dest='compress', help='compression to use for ssh transfer. default is lzop') 68 | parser_send.add_argument('-e', '--exclude', nargs = '+', 69 | dest='exclude', help='datasets to exclude') 70 | parser_send.add_argument('-w', '--raw', action="store_true", 71 | dest='raw', help='raw zfs send. default is false') 72 | parser_send.add_argument('-r', '--resume', action="store_true", 73 | dest='resume', help='resumable send. default is false') 74 | parser_send.add_argument('--dest-auto-create', action="store_true", 75 | dest='dest_auto_create', 76 | help='create destination if it does not exist. default is false') 77 | parser_send.add_argument('--retries', action="store", type=int, 78 | dest='retries', default=0, 79 | help='number of retries on error. default is 0') 80 | parser_send.add_argument('--retry-interval', action="store", type=int, 81 | dest='retry_interval', default=10, 82 | help='interval in seconds between retries. default is 10') 83 | 84 | if len(sys.argv)==1: 85 | parser.print_help(sys.stderr) 86 | sys.exit(1) 87 | args = parser.parse_args() 88 | 89 | loglevel = logging.DEBUG if args.verbose else logging.INFO 90 | logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s: %(message)s', 91 | datefmt='%b %d %H:%M:%S', stream=sys.stdout) 92 | logger = logging.getLogger(__name__) 93 | 94 | logger.info('Starting pyznap...') 95 | 96 | if args.command in ('snap', 'send'): 97 | config_path = args.config if args.config else os.path.join(CONFIG_DIR, 'pyznap.conf') 98 | config = read_config(config_path) 99 | if config == None: 100 | return 1 101 | 102 | if args.command == 'setup': 103 | path = args.path if args.path else CONFIG_DIR 104 | create_config(path) 105 | 106 | elif args.command == 'snap': 107 | # Default if no args are given 108 | if not args.take and not args.clean: 109 | args.full = True 110 | 111 | if args.take or args.full: 112 | take_config(config) 113 | 114 | if args.clean or args.full: 115 | clean_config(config) 116 | 117 | elif args.command == 'send': 118 | if args.source and args.dest: 119 | # use args.key if either source or dest is remote 120 | source_key, dest_key = None, None 121 | if args.dest.startswith('ssh'): 122 | dest_key = [args.key] if args.key else None 123 | elif args.source.startswith('ssh'): 124 | source_key = args.key if args.key else None 125 | # if source_key and dest_key are given, overwrite previous value 126 | source_key = args.source_key if args.source_key else source_key 127 | dest_key = [args.dest_key] if args.dest_key else dest_key 128 | # get exclude rules 129 | exclude = [args.exclude] if args.exclude else None 130 | # check if raw send was requested 131 | raw = [args.raw] if args.raw else None 132 | # compress ssh zfs send/receive 133 | compress = [args.compress] if args.compress else None 134 | # use receive resume token 135 | resume = [args.resume] if args.resume else None 136 | # retry zfs send/receive 137 | retries = [args.retries] if args.retries else None 138 | # wait interval for retry 139 | retry_interval = [args.retry_interval] if args.retry_interval else None 140 | # automatically create dest dataset if it does not exist 141 | dest_auto_create = [args.dest_auto_create] if args.dest_auto_create else None 142 | 143 | send_config([{'name': args.source, 'dest': [args.dest], 'key': source_key, 144 | 'dest_keys': dest_key, 'compress': compress, 'exclude': exclude, 145 | 'raw_send': raw, 'resume': resume, 'dest_auto_create': dest_auto_create, 146 | 'retries': retries, 'retry_interval': retry_interval}]) 147 | 148 | elif args.source and not args.dest: 149 | logger.error('Missing dest...') 150 | elif args.dest and not args.source: 151 | logger.error('Missing source...') 152 | else: 153 | send_config(config) 154 | 155 | logger.info('Finished successfully...\n') 156 | return 0 157 | 158 | 159 | def main(): 160 | """Wrapper around _main function to catch KeyboardInterrupt 161 | 162 | Returns 163 | ------- 164 | int 165 | Exit code 166 | """ 167 | 168 | logger = logging.getLogger(__name__) 169 | try: 170 | return _main() 171 | except KeyboardInterrupt: 172 | logger.error('KeyboardInterrupt - exiting gracefully...\n') 173 | return 1 174 | 175 | 176 | if __name__ == "__main__": 177 | sys.exit(main()) 178 | -------------------------------------------------------------------------------- /pyznap/process.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.process 3 | ~~~~~~~~~~~~~~ 4 | 5 | Catch ZFS subprocess errors, forked from https://bitbucket.org/stevedrake/weir/. 6 | 7 | :copyright: (c) 2015-2019 by Stephen Drake, Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | import re 12 | import errno as _errno 13 | import subprocess as sp 14 | import socket 15 | 16 | PIPE = sp.PIPE 17 | 18 | 19 | class ZFSError(OSError): 20 | def __init__(self, dataset): 21 | super(ZFSError, self).__init__(self.errno, self.strerror, dataset) 22 | 23 | class DatasetNotFoundError(ZFSError): 24 | errno = _errno.ENOENT 25 | strerror = 'dataset does not exist' 26 | 27 | class DatasetExistsError(ZFSError): 28 | errno = _errno.EEXIST 29 | strerror = 'dataset already exists' 30 | 31 | class DatasetBusyError(ZFSError): 32 | errno = _errno.EBUSY 33 | strerror = 'dataset is busy' 34 | 35 | class HoldTagNotFoundError(ZFSError): 36 | errno = _errno.ENOENT 37 | strerror = 'no such tag on this dataset' 38 | 39 | class HoldTagExistsError(ZFSError): 40 | errno = _errno.EEXIST 41 | strerror = 'tag already exists on this dataset' 42 | 43 | class CompletedProcess(sp.CompletedProcess): 44 | def check_returncode(self): 45 | """Check for known errors of the form "cannot : " 46 | 47 | Raises 48 | ------ 49 | DatasetNotFoundError, DatasetExistsError, DatasetBusyError, HoldTagNotFoundError, HoldTagExistsError 50 | Raises corresponding ZFS error 51 | """ 52 | 53 | if self.returncode == 1: 54 | pattern = r"^cannot ([^ ]+(?: [^ ]+)*?) ([^ ]+): (.+)$" 55 | # only use first line of stderr to match zfs errors 56 | match = re.search(pattern, self.stderr.splitlines()[0]) 57 | if match: 58 | _, dataset, reason = match.groups() 59 | if dataset[0] == dataset[-1] == "'": 60 | dataset = dataset[1:-1] 61 | for error in (DatasetNotFoundError, 62 | DatasetExistsError, 63 | DatasetBusyError, 64 | HoldTagNotFoundError, 65 | HoldTagExistsError): 66 | if reason == error.strerror: 67 | raise error(dataset) 68 | 69 | # did not match known errors, defer to superclass 70 | super(CompletedProcess, self).check_returncode() 71 | 72 | 73 | def check_output(*popenargs, timeout=None, ssh=None, **kwargs): 74 | """Run command with arguments and return its output. Works over ssh. 75 | Parameters: 76 | ---------- 77 | *popenargs : {} 78 | Variable length argument list, same as Popen constructor 79 | **kwargs : {} 80 | Arbitrary keyword arguments, same as Popen constructor 81 | timeout : {float}, optional 82 | Timeout in seconds, if process takes too long TimeoutExpired will be raised (the default is 83 | None, meaning no timeout) 84 | ssh : {ssh.SSH}, optional 85 | Open ssh connection for remote execution (the default is None) 86 | Raises 87 | ------ 88 | ValueError 89 | Raise ValueError for forbidden kwargs 90 | Returns 91 | ------- 92 | None or list of lists 93 | List of all lines from the output, seperated at '\t' into lists 94 | """ 95 | 96 | if 'stdout' in kwargs: 97 | raise ValueError('stdout argument not allowed, it will be overridden.') 98 | if 'universal_newlines' in kwargs: 99 | raise ValueError('universal_newlines argument not allowed, it will be overridden.') 100 | if 'input' in kwargs: 101 | raise ValueError('input argument not allowed, it will be overridden.') 102 | 103 | ret = run(*popenargs, stdout=PIPE, stderr=PIPE, timeout=timeout, 104 | universal_newlines=True, ssh=ssh, **kwargs) 105 | ret.check_returncode() 106 | out = ret.stdout 107 | 108 | return None if out is None else [line.split('\t') for line in out.splitlines()] 109 | 110 | 111 | def run(*popenargs, timeout=None, check=False, ssh=None, **kwargs): 112 | """Run command with ZFS arguments and return a CompletedProcess instance. Works over ssh. 113 | Parameters: 114 | ---------- 115 | *popenargs : {} 116 | Variable length argument list, same as Popen constructor 117 | **kwargs : {} 118 | Arbitrary keyword arguments, same as Popen constructor 119 | timeout : {float}, optional 120 | Timeout in seconds, if process takes too long TimeoutExpired will be raised (the default is 121 | None, meaning no timeout) 122 | check : {bool}, optional 123 | Check return code (the default is False, meaning return code will not be checked) 124 | ssh : {ssh.SSH}, optional 125 | Open ssh connection for remote execution (the default is None) 126 | Raises 127 | ------ 128 | sp.TimeoutExpired 129 | Raised if process takes longer than given timeout 130 | sp.CalledProcessError 131 | Raised if check=True and return code != 0 132 | Returns 133 | ------- 134 | subprocess.CompletedProcess 135 | Return instance of CompletedProcess with given return code, stdout and stderr 136 | """ 137 | 138 | if ssh: 139 | popenargs = (ssh.cmd + popenargs[0], *popenargs[1:]) 140 | 141 | with sp.Popen(*popenargs, **kwargs) as process: 142 | try: 143 | stdout, stderr = process.communicate(timeout=timeout) 144 | except sp.TimeoutExpired: 145 | process.kill() 146 | stdout, stderr = process.communicate() 147 | raise sp.TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr) 148 | except: 149 | process.kill() 150 | process.wait() 151 | raise 152 | retcode = process.poll() 153 | 154 | if check and retcode: 155 | raise sp.CalledProcessError(retcode, popenargs[0], output=stdout, stderr=stderr) 156 | 157 | return CompletedProcess(popenargs[0], retcode, stdout, stderr) 158 | -------------------------------------------------------------------------------- /pyznap/pyzfs.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.pyzfs 3 | ~~~~~~~~~~~~~~ 4 | 5 | Python ZFS bindings, forked from https://bitbucket.org/stevedrake/weir/. 6 | 7 | :copyright: (c) 2015-2019 by Stephen Drake, Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | 12 | import sys 13 | import logging 14 | import subprocess as sp 15 | from shlex import quote 16 | from .process import check_output, DatasetNotFoundError, DatasetBusyError 17 | from .utils import exists 18 | 19 | 20 | SHELL = ['sh', '-c'] 21 | 22 | # Use mbuffer if installed on the system 23 | if exists('mbuffer'): 24 | MBUFFER = lambda mem: ['mbuffer', '-q', '-s', '128K', '-m', '{:d}M'.format(mem)] 25 | else: 26 | MBUFFER = None 27 | 28 | # Use pv if installed on the system 29 | if exists('pv'): 30 | PV = lambda size: ['pv', '-f', '-w', '100', '-s', str(size)] 31 | else: 32 | PV = None 33 | 34 | 35 | def find(path=None, ssh=None, max_depth=None, types=[]): 36 | """Lists filesystems and snapshots for a given path""" 37 | cmd = ['zfs', 'list'] 38 | 39 | cmd.append('-H') 40 | 41 | if max_depth is None: 42 | cmd.append('-r') 43 | elif max_depth >= 0: 44 | cmd.append('-d') 45 | cmd.append(str(max_depth)) 46 | else: 47 | raise TypeError('max_depth must be a non-negative int or None') 48 | 49 | if types: 50 | cmd.append('-t') 51 | cmd.append(','.join(types)) 52 | 53 | cmd.append('-o') 54 | cmd.append('name,type') 55 | 56 | if path: 57 | cmd.append(path) 58 | 59 | out = check_output(cmd, ssh=ssh) 60 | 61 | return [open(name, ssh=ssh, type=type) for name, type in out] 62 | 63 | 64 | def findprops(path=None, ssh=None, max_depth=None, props=['all'], sources=[], types=[]): 65 | """Lists all properties of a given filesystem""" 66 | cmd = ['zfs', 'get'] 67 | 68 | cmd.append('-H') 69 | cmd.append('-p') 70 | 71 | if max_depth is None: 72 | cmd.append('-r') 73 | elif max_depth >= 0: 74 | cmd.append('-d') 75 | cmd.append(str(max_depth)) 76 | else: 77 | raise TypeError('max_depth must be a non-negative int or None') 78 | 79 | if types: 80 | cmd.append('-t') 81 | cmd.append(','.join(types)) 82 | 83 | if sources: 84 | cmd.append('-s') 85 | cmd.append(','.join(sources)) 86 | 87 | cmd.append(','.join(props)) 88 | 89 | if path: 90 | cmd.append(path) 91 | 92 | out = check_output(cmd, ssh=ssh) 93 | 94 | names = set(map(lambda x: x[0], out)) 95 | 96 | # return [dict(name=n, property=p, value=v, source=s) for n, p, v, s in out] 97 | return {name: {i[1]: (i[2], i[3]) for i in out if i[0] == name} for name in names} 98 | 99 | 100 | # Factory function for dataset objects 101 | def open(name, ssh=None, type=None): 102 | """Opens a volume, filesystem or snapshot""" 103 | if type is None: 104 | type = findprops(name, ssh=ssh, max_depth=0, props=['type'])[name]['type'][0] 105 | 106 | if type == 'volume': 107 | return ZFSVolume(name, ssh) 108 | 109 | if type == 'filesystem': 110 | return ZFSFilesystem(name, ssh) 111 | 112 | if type == 'snapshot': 113 | return ZFSSnapshot(name, ssh) 114 | 115 | raise ValueError('invalid dataset type %s' % type) 116 | 117 | 118 | def roots(ssh=None): 119 | return find(ssh=ssh, max_depth=0) 120 | 121 | # note: force means create missing parent filesystems 122 | def create(name, ssh=None, type='filesystem', props={}, force=False): 123 | cmd = ['zfs', 'create'] 124 | 125 | if type == 'volume': 126 | raise NotImplementedError() 127 | elif type != 'filesystem': 128 | raise ValueError('invalid type %s' % type) 129 | 130 | if force: 131 | cmd.append('-p') 132 | 133 | for prop, value in props.items(): 134 | cmd.append('-o') 135 | cmd.append(prop + '=' + str(value)) 136 | 137 | cmd.append(name) 138 | 139 | check_output(cmd, ssh=ssh) 140 | 141 | return ZFSFilesystem(name, ssh=ssh) 142 | 143 | 144 | def receive(name, stdin, ssh=None, ssh_source=None, append_name=False, append_path=False, 145 | force=False, nomount=False, stream_size=0, raw=False, resume=False): 146 | """Returns Popen instance for zfs receive""" 147 | logger = logging.getLogger(__name__) 148 | 149 | # use minimal mbuffer size of 1 and maximal size of 512 (256 over ssh) 150 | mbuff_size = min(max(stream_size // 1024**2, 1), 256 if (ssh_source or ssh) else 512) 151 | 152 | # choose shell (sh or ssh) and mbuffer command on local / remote 153 | if ssh: 154 | shell = ssh.cmd 155 | mbuffer = ssh.mbuffer 156 | else: 157 | shell = SHELL 158 | mbuffer = MBUFFER 159 | 160 | # only compress if send is over ssh 161 | if ssh_source and ssh: 162 | decompress = ssh_source.decompress if ssh_source.decompress == ssh.decompress else None 163 | elif ssh_source or ssh: 164 | decompress = ssh_source.decompress if ssh_source else ssh.decompress 165 | else: 166 | decompress = None 167 | 168 | # construct zfs receive command 169 | cmd = ['zfs', 'receive'] 170 | 171 | # cmd.append('-v') 172 | 173 | if append_name: 174 | cmd.append('-e') 175 | elif append_path: 176 | cmd.append('-d') 177 | 178 | if force: 179 | cmd.append('-F') 180 | if nomount: 181 | cmd.append('-u') 182 | if resume: 183 | cmd.append('-s') 184 | 185 | cmd.append(quote(name)) # use shlex to quote the name 186 | 187 | # add additional commands 188 | if decompress and not raw: # disable compression for raw send 189 | logger.debug("Using compression on dest: '{:s}'...".format(' '.join(decompress))) 190 | cmd = decompress + ['|'] + cmd 191 | # only use mbuffer at recv if send is over ssh 192 | if (ssh_source or ssh) and mbuffer and stream_size >= 1024**2: # don't use mbuffer if stream size is too small 193 | logger.debug("Using mbuffer on dest: '{:s}'...".format(' '.join(mbuffer(mbuff_size)))) 194 | cmd = mbuffer(mbuff_size) + ['|'] + cmd 195 | 196 | # execute command with shell (sh or ssh) 197 | cmd = shell + [' '.join(cmd)] 198 | 199 | return sp.Popen(cmd, stdin=stdin, stderr=sp.PIPE) # zfs receive process 200 | 201 | 202 | class ZFSDataset(object): 203 | def __init__(self, name, ssh=None): 204 | self.name = name 205 | self.ssh = ssh 206 | 207 | def __str__(self): 208 | return '{:s}@{:s}:{:s}'.format(self.ssh.user, self.ssh.host, self.name) if self.ssh else self.name 209 | 210 | def __repr__(self): 211 | name = self.__str__() 212 | return '{0}({1!r})'.format(self.__class__.__name__, name) 213 | 214 | def parent(self): 215 | parent_name, _, _ = self.name.rpartition('/') 216 | return open(parent_name, ssh=self.ssh) if parent_name else None 217 | 218 | def filesystems(self): 219 | return find(self.name, ssh=self.ssh, max_depth=1, types=['filesystem'])[1:] 220 | 221 | def snapshots(self): 222 | return find(self.name, ssh=self.ssh, max_depth=1, types=['snapshot']) 223 | 224 | def children(self): 225 | return find(self.name, ssh=self.ssh, max_depth=1, types=['all'])[1:] 226 | 227 | def clones(self): 228 | raise NotImplementedError() 229 | 230 | def dependents(self): 231 | raise NotImplementedError() 232 | 233 | # TODO: split force to allow -f, -r and -R to be specified individually 234 | # TODO: remove or ignore defer option for non-snapshot datasets 235 | def destroy(self, defer=False, force=False): 236 | cmd = ['zfs', 'destroy'] 237 | 238 | cmd.append('-v') 239 | 240 | if defer: 241 | cmd.append('-d') 242 | 243 | if force: 244 | cmd.append('-f') 245 | cmd.append('-R') 246 | 247 | cmd.append(self.name) 248 | 249 | check_output(cmd, ssh=self.ssh) 250 | 251 | def snapshot(self, snapname, recursive=False, props={}): 252 | cmd = ['zfs', 'snapshot'] 253 | 254 | if recursive: 255 | cmd.append('-r') 256 | 257 | for prop, value in props.items(): 258 | cmd.append('-o') 259 | cmd.append(prop + '=' + str(value)) 260 | 261 | name = self.name + '@' + snapname 262 | cmd.append(name) 263 | 264 | check_output(cmd, ssh=self.ssh) 265 | return ZFSSnapshot(name, ssh=self.ssh) 266 | 267 | def receive_abort(self): 268 | """Aborts the resumeable receive state""" 269 | cmd = ['zfs', 'receive'] 270 | 271 | cmd.append('-A') 272 | cmd.append(self.name) 273 | 274 | check_output(cmd, ssh=self.ssh) 275 | 276 | # TODO: split force to allow -f, -r and -R to be specified individually 277 | def rollback(self, snapname, force=False): 278 | raise NotImplementedError() 279 | 280 | def promote(self): 281 | raise NotImplementedError() 282 | 283 | # TODO: split force to allow -f and -p to be specified individually 284 | def rename(self, name, recursive=False, force=False): 285 | raise NotImplementedError() 286 | 287 | def getprops(self): 288 | return findprops(self.name, ssh=self.ssh, max_depth=0)[self.name] 289 | 290 | def getprop(self, prop): 291 | return findprops(self.name, ssh=self.ssh, max_depth=0, props=[prop])[self.name].get(prop, None) 292 | 293 | def getpropval(self, prop, default=None): 294 | value = self.getprop(prop)['value'] 295 | return default if value == '-' else value 296 | 297 | def setprop(self, prop, value): 298 | cmd = ['zfs', 'set'] 299 | 300 | cmd.append(prop + '=' + str(value)) 301 | cmd.append(self.name) 302 | 303 | check_output(cmd, ssh=self.ssh) 304 | 305 | def delprop(self, prop, recursive=False): 306 | cmd = ['zfs', 'inherit'] 307 | 308 | if recursive: 309 | cmd.append('-r') 310 | 311 | cmd.append(prop) 312 | cmd.append(self.name) 313 | 314 | check_output(cmd, ssh=self.ssh) 315 | 316 | def userspace(self, *args, **kwargs): 317 | raise NotImplementedError() 318 | 319 | def groupspace(self, *args, **kwargs): 320 | raise NotImplementedError() 321 | 322 | def share(self, *args, **kwargs): 323 | raise NotImplementedError() 324 | 325 | def unshare(self, *args, **kwargs): 326 | raise NotImplementedError() 327 | 328 | def allow(self, *args, **kwargs): 329 | raise NotImplementedError() 330 | 331 | def unallow(self, *args, **kwargs): 332 | raise NotImplementedError() 333 | 334 | class ZFSVolume(ZFSDataset): 335 | pass 336 | 337 | class ZFSFilesystem(ZFSDataset): 338 | def upgrade(self, *args, **kwargs): 339 | raise NotImplementedError() 340 | 341 | def mount(self, *args, **kwargs): 342 | raise NotImplementedError() 343 | 344 | def unmount(self, *args, **kwargs): 345 | raise NotImplementedError() 346 | 347 | class ZFSSnapshot(ZFSDataset): 348 | def snapname(self): 349 | snapname = self.name.split('@')[-1] 350 | return snapname 351 | 352 | def parent(self): 353 | parent_path = self.name.split('@')[0] 354 | return open(name=parent_path, ssh=self.ssh) 355 | 356 | # note: force means create missing parent filesystems 357 | def clone(self, name, props={}, force=False): 358 | raise NotImplementedError() 359 | 360 | def send(self, ssh_dest=None, base=None, intermediates=False, replicate=False, 361 | properties=False, deduplicate=False, raw=False, resume_token=None): 362 | logger = logging.getLogger(__name__) 363 | 364 | # get the size of the snapshot to send 365 | stream_size = self.stream_size(base=base, raw=raw, resume_token=resume_token) 366 | # use minimal mbuffer size of 1 and maximal size of 512 (256 over ssh) 367 | mbuff_size = min(max(stream_size // 1024**2, 1), 256 if (self.ssh or ssh_dest) else 512) 368 | 369 | # choose shell (sh or ssh) and mbuffer, pv commands on local / remote 370 | if self.ssh: 371 | shell = self.ssh.cmd 372 | mbuffer, pv = self.ssh.mbuffer, self.ssh.pv 373 | else: 374 | shell = SHELL 375 | mbuffer, pv = MBUFFER, PV 376 | 377 | # only compress if send is over ssh 378 | if self.ssh and ssh_dest: 379 | compress = self.ssh.compress if self.ssh.compress == ssh_dest.compress else None 380 | elif self.ssh or ssh_dest: 381 | compress = self.ssh.compress if self.ssh else ssh_dest.compress 382 | else: 383 | compress = None 384 | 385 | # construct zfs send command 386 | cmd = ['zfs', 'send'] 387 | 388 | # cmd.append('-v') 389 | # cmd.append('-P') 390 | if resume_token is not None: 391 | cmd.append('-t') 392 | cmd.append(resume_token) 393 | else: # normal send 394 | if replicate: 395 | cmd.append('-R') 396 | if properties: 397 | cmd.append('-p') 398 | if deduplicate: 399 | cmd.append('-D') 400 | if raw: 401 | logger.debug("Using raw zfs send...") 402 | cmd.append('-w') 403 | 404 | if base is not None: 405 | if intermediates: 406 | cmd.append('-I') 407 | else: 408 | cmd.append('-i') 409 | cmd.append(quote(base.name)) # use shlex to quote the name 410 | 411 | cmd.append(quote(self.name)) # use shlex to quote the name 412 | 413 | # add additional commands 414 | if mbuffer and stream_size >= 1024**2: # don't use mbuffer if stream size is too small 415 | logger.debug("Using mbuffer on source: '{:s}'...".format(' '.join(mbuffer(mbuff_size)))) 416 | cmd += ['|'] + mbuffer(mbuff_size) 417 | 418 | if pv and stream_size >= 1024**2: # don't use pv if stream size is too small 419 | pv_cmd = pv(stream_size) 420 | if not sys.stdout.isatty(): 421 | pv_cmd += ['-D', '60', '-i', '60'] # if stdout is redirected, only update pv every 60s 422 | logger.debug("Using pv on source: '{:s}'...".format(' '.join(pv_cmd))) 423 | cmd += ['|'] + pv_cmd 424 | 425 | if compress and not raw: # disable compression for raw send 426 | logger.debug("Using compression on source: '{:s}'...".format(' '.join(compress))) 427 | cmd += ['|'] + compress 428 | 429 | # execute command with shell (sh or ssh) 430 | cmd = shell + [' '.join(cmd)] 431 | 432 | return sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # return zfs send process 433 | 434 | def stream_size(self, base=None, raw=False, resume_token=None): 435 | cache_key = (str(base), raw, resume_token) 436 | # cache stream sizes 437 | if not hasattr(self, 'stream_cache'): 438 | self.stream_cache = {} 439 | elif cache_key in self.stream_cache: 440 | return self.stream_cache[cache_key] 441 | else: 442 | self.stream_cache[cache_key] = 0 443 | 444 | cmd = ['zfs', 'send', '-nvP'] 445 | 446 | if raw: 447 | cmd.append('-w') 448 | 449 | if resume_token is not None: 450 | cmd.append('-t') 451 | cmd.append(resume_token) 452 | else: 453 | if base is not None: 454 | cmd.append('-I') 455 | cmd.append(base.name) 456 | 457 | cmd.append(self.name) 458 | 459 | try: 460 | out = check_output(cmd, ssh=self.ssh) 461 | except (DatasetNotFoundError, DatasetBusyError, sp.CalledProcessError): 462 | return 0 463 | 464 | try: 465 | out = out[-1][-1] 466 | size = int(out.split(' ')[-1]) 467 | self.stream_cache[cache_key] = size 468 | return size 469 | except (IndexError, ValueError): 470 | return 0 471 | 472 | def hold(self, tag, recursive=False): 473 | cmd = ['zfs', 'hold'] 474 | 475 | if recursive: 476 | cmd.append('-r') 477 | 478 | cmd.append(tag) 479 | cmd.append(self.name) 480 | 481 | check_output(cmd, ssh=self.ssh) 482 | 483 | def holds(self): 484 | cmd = ['zfs', 'holds'] 485 | 486 | cmd.append('-H') 487 | 488 | cmd.append(self.name) 489 | 490 | out = check_output(cmd, ssh=self.ssh) 491 | 492 | # return hold tag names only 493 | return [hold[1] for hold in out] 494 | 495 | def release(self, tag, recursive=False): 496 | cmd = ['zfs', 'release'] 497 | 498 | if recursive: 499 | cmd.append('-r') 500 | 501 | cmd.append(tag) 502 | cmd.append(self.name) 503 | 504 | check_output(cmd, ssh=self.ssh) 505 | -------------------------------------------------------------------------------- /pyznap/send.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.send 3 | ~~~~~~~~~~~~~~ 4 | 5 | Send snapshots. 6 | 7 | :copyright: (c) 2018-2019 by Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | 12 | import sys 13 | import logging 14 | from io import TextIOWrapper 15 | from datetime import datetime 16 | from subprocess import Popen, PIPE, CalledProcessError 17 | from fnmatch import fnmatch 18 | from time import sleep 19 | from .ssh import SSH, SSHException 20 | from .utils import parse_name, exists, check_recv, bytes_fmt 21 | import pyznap.pyzfs as zfs 22 | from .process import DatasetBusyError, DatasetNotFoundError, DatasetExistsError 23 | 24 | 25 | def send_snap(snapshot, dest_name, base=None, ssh_dest=None, raw=False, resume=False, resume_token=None): 26 | """Sends snapshot to destination, incrementally and over ssh if specified. 27 | 28 | Parameters: 29 | ---------- 30 | snapshot : {ZFSSnapshot} 31 | Snapshot to send 32 | dest_name : {str} 33 | Name of the location to send snapshot 34 | base : {ZFSSnapshot}, optional 35 | Base snapshot for incremental stream (the default is None, meaning a full stream) 36 | ssh_dest : {ssh.SSH}, optional 37 | Open ssh connection for remote backup (the default is None, meaning local backup) 38 | 39 | Returns 40 | ------- 41 | int 42 | 0 if success, 1 if not, 2 if CalledProcessError 43 | """ 44 | 45 | logger = logging.getLogger(__name__) 46 | dest_name_log = '{:s}@{:s}:{:s}'.format(ssh_dest.user, ssh_dest.host, dest_name) if ssh_dest else dest_name 47 | 48 | try: 49 | ssh_source = snapshot.ssh 50 | stream_size = snapshot.stream_size(base=base, raw=raw, resume_token=resume_token) 51 | 52 | send = snapshot.send(ssh_dest=ssh_dest, base=base, intermediates=True, raw=raw, resume_token=resume_token) 53 | recv = zfs.receive(name=dest_name, stdin=send.stdout, ssh=ssh_dest, ssh_source=ssh_source, 54 | force=True, nomount=True, stream_size=stream_size, raw=raw, resume=resume) 55 | send.stdout.close() 56 | 57 | # write pv output to stderr / stdout 58 | for line in TextIOWrapper(send.stderr, newline='\r'): 59 | if sys.stdout.isatty(): 60 | sys.stderr.write(' ' + line) 61 | sys.stderr.flush() 62 | elif line.rstrip(): # is stdout is redirected, write pv to stdout 63 | sys.stdout.write(' ' + line.rstrip() + '\n') 64 | sys.stdout.flush() 65 | send.stderr.close() 66 | 67 | stdout, stderr = recv.communicate() 68 | # raise any error that occured 69 | if recv.returncode: 70 | raise CalledProcessError(returncode=recv.returncode, cmd=recv.args, output=stdout, stderr=stderr) 71 | 72 | except (DatasetNotFoundError, DatasetExistsError, DatasetBusyError, OSError, EOFError) as err: 73 | logger.error('Error while sending to {:s}: {}...'.format(dest_name_log, err)) 74 | return 1 75 | except CalledProcessError as err: 76 | logger.error('Error while sending to {:s}: {}...'.format(dest_name_log, err.stderr.rstrip().decode().replace('\n', ' - '))) 77 | # returncode 2 means we will retry send if requested 78 | return 2 79 | except KeyboardInterrupt: 80 | logger.error('KeyboardInterrupt while sending to {:s}...'.format(dest_name_log)) 81 | raise 82 | else: 83 | return 0 84 | 85 | 86 | def send_filesystem(source_fs, dest_name, ssh_dest=None, raw=False, resume=False): 87 | """Checks for common snapshots between source and dest. 88 | If none are found, send the oldest snapshot, then update with the most recent one. 89 | If there are common snaps, update destination with the most recent one. 90 | 91 | Parameters: 92 | ---------- 93 | source_fs : {ZFSFilesystem} 94 | Source zfs filesystem from where to send 95 | dest_name : {str} 96 | Name of the location to send to 97 | ssh_dest : {ssh.SSH}, optional 98 | Open ssh connection for remote backup (the default is None, meaning local backup) 99 | 100 | Returns 101 | ------- 102 | int 103 | 0 if success, 1 if not, 2 for ssh errors 104 | """ 105 | 106 | logger = logging.getLogger(__name__) 107 | dest_name_log = '{:s}@{:s}:{:s}'.format(ssh_dest.user, ssh_dest.host, dest_name) if ssh_dest else dest_name 108 | 109 | logger.debug('Sending {} to {:s}...'.format(source_fs, dest_name_log)) 110 | 111 | resume_token = None 112 | # Check if dest already has a 'zfs receive' ongoing 113 | if check_recv(dest_name, ssh=ssh_dest): 114 | return 1 115 | 116 | # get snapshots on source, catch exception if dataset was destroyed since pyznap was started 117 | try: 118 | snapshots = source_fs.snapshots()[::-1] 119 | except (DatasetNotFoundError, DatasetBusyError) as err: 120 | logger.error('Error while opening source {}: {}...'.format(source_fs, err)) 121 | return 1 122 | except CalledProcessError as err: 123 | message = err.stderr.rstrip() 124 | if message.startswith('ssh: '): 125 | logger.error('Connection issue while opening source {}: \'{:s}\'...' 126 | .format(source_fs, message)) 127 | return 2 128 | else: 129 | logger.error('Error while opening source {}: \'{:s}\'...' 130 | .format(source_fs, message)) 131 | return 1 132 | snapnames = [snap.name.split('@')[1] for snap in snapshots] 133 | 134 | try: 135 | snapshot = snapshots[0] # Most recent snapshot 136 | base = snapshots[-1] # Oldest snapshot 137 | except IndexError: 138 | logger.error('No snapshots on {}, cannot send...'.format(source_fs)) 139 | return 1 140 | 141 | try: 142 | dest_fs = zfs.open(dest_name, ssh=ssh_dest) 143 | except DatasetNotFoundError: 144 | dest_snapnames = [] 145 | common = set() 146 | except CalledProcessError as err: 147 | message = err.stderr.rstrip() 148 | if message.startswith('ssh: '): 149 | logger.error('Connection issue while opening dest {:s}: \'{:s}\'...' 150 | .format(dest_name_log, message)) 151 | return 2 152 | else: 153 | logger.error('Error while opening dest {:s}: \'{:s}\'...' 154 | .format(dest_name_log, message)) 155 | return 1 156 | else: 157 | # if dest exists, check for resume token 158 | resume_token = dest_fs.getprops().get('receive_resume_token', (None, None))[0] 159 | # find common snapshots between source & dest 160 | dest_snapnames = [snap.name.split('@')[1] for snap in dest_fs.snapshots()] 161 | common = set(snapnames) & set(dest_snapnames) 162 | 163 | # if not resume and resume_token is not None: 164 | # if not abort: 165 | # logger.error('{:s} contains partially-complete state from "zfs receive -s" (~{:s}), ' 166 | # 'but neither resume nor abort option is given...' 167 | # .format(dest_name_log, bytes_fmt(base.stream_size(raw=raw, resume_token=resume_token)))) 168 | # return 1 169 | # else: 170 | # logger.info('{:s} contains partially-complete state from "zfs receive -s" (~{:s}), ' 171 | # 'will abort it...' 172 | # .format(dest_name_log, bytes_fmt(base.stream_size(raw=raw, resume_token=resume_token)))) 173 | # if abort_resume(dest_fs): 174 | # return 1 175 | 176 | if resume_token is not None: 177 | logger.info('Found resume token. Resuming last transfer of {:s} (~{:s})...' 178 | .format(dest_name_log, bytes_fmt(base.stream_size(raw=raw, resume_token=resume_token)))) 179 | rc = send_snap(base, dest_name, base=None, ssh_dest=ssh_dest, raw=raw, resume=True, resume_token=resume_token) 180 | if rc: 181 | return rc 182 | # we need to update common snapshots after finishing the resumable send 183 | dest_snapnames = [snap.name.split('@')[1] for snap in dest_fs.snapshots()] 184 | common = set(snapnames) & set(dest_snapnames) 185 | 186 | if not common: 187 | if dest_snapnames: 188 | logger.error('No common snapshots on {:s}, but snapshots exist. Not sending...' 189 | .format(dest_name_log)) 190 | return 1 191 | else: 192 | logger.info('No common snapshots on {:s}, sending oldest snapshot {} (~{:s})...' 193 | .format(dest_name_log, base, bytes_fmt(base.stream_size(raw=raw)))) 194 | rc = send_snap(base, dest_name, base=None, ssh_dest=ssh_dest, raw=raw, resume=resume) 195 | if rc: 196 | return rc 197 | else: 198 | # If there are common snapshots, get the most recent one 199 | base = next(filter(lambda x: x.name.split('@')[1] in common, snapshots), None) 200 | 201 | if base.name != snapshot.name: 202 | logger.info('Updating {:s} with recent snapshot {} (~{:s})...' 203 | .format(dest_name_log, snapshot, bytes_fmt(snapshot.stream_size(base, raw=raw)))) 204 | rc = send_snap(snapshot, dest_name, base=base, ssh_dest=ssh_dest, raw=raw, resume=resume) 205 | if rc: 206 | return rc 207 | 208 | logger.info('{:s} is up to date...'.format(dest_name_log)) 209 | return 0 210 | 211 | 212 | def send_config(config): 213 | """Tries to sync all entries in the config to their dest. Finds all children of the filesystem 214 | and calls send_filesystem on each of them. 215 | 216 | Parameters: 217 | ---------- 218 | config : {list of dict} 219 | Full config list containing all strategies for different filesystems 220 | """ 221 | 222 | logger = logging.getLogger(__name__) 223 | logger.info('Sending snapshots...') 224 | 225 | for conf in config: 226 | if not conf.get('dest', None): 227 | continue 228 | 229 | backup_source = conf['name'] 230 | try: 231 | _type, source_name, user, host, port = parse_name(backup_source) 232 | except ValueError as err: 233 | logger.error('Could not parse {:s}: {}...'.format(backup_source, err)) 234 | continue 235 | 236 | # if source is remote, open ssh connection 237 | if _type == 'ssh': 238 | key = conf['key'] if conf.get('key', None) else None 239 | compress = conf['compress'].pop(0) if conf.get('compress', None) else 'lzop' 240 | try: 241 | ssh_source = SSH(user, host, port=port, key=key, compress=compress) 242 | except (FileNotFoundError, SSHException): 243 | continue 244 | source_name_log = '{:s}@{:s}:{:s}'.format(user, host, source_name) 245 | else: 246 | ssh_source = None 247 | source_name_log = source_name 248 | 249 | try: 250 | # Children includes the base filesystem (named 'source_name') 251 | source_children = zfs.find(path=source_name, types=['filesystem', 'volume'], ssh=ssh_source) 252 | except DatasetNotFoundError as err: 253 | logger.error('Source {:s} does not exist...'.format(source_name_log)) 254 | continue 255 | except ValueError as err: 256 | logger.error(err) 257 | continue 258 | except CalledProcessError as err: 259 | logger.error('Error while opening source {:s}: \'{:s}\'...' 260 | .format(source_name_log, err.stderr.rstrip())) 261 | continue 262 | 263 | # Send to every backup destination 264 | for backup_dest in conf['dest']: 265 | # get exclude rules 266 | exclude = conf['exclude'].pop(0) if conf.get('exclude', None) else [] 267 | # check if raw send was requested 268 | raw = conf['raw_send'].pop(0) if conf.get('raw_send', None) else False 269 | # check if we need to retry 270 | retries = conf['retries'].pop(0) if conf.get('retries', None) else 0 271 | retry_interval = conf['retry_interval'].pop(0) if conf.get('retry_interval', None) else 10 272 | # check if resumable send was requested 273 | resume = conf['resume'].pop(0) if conf.get('resume', None) else False 274 | # check if we should create dataset if it doesn't exist 275 | dest_auto_create = conf['dest_auto_create'].pop(0) if conf.get('dest_auto_create', None) else False 276 | 277 | try: 278 | _type, dest_name, user, host, port = parse_name(backup_dest) 279 | except ValueError as err: 280 | logger.error('Could not parse {:s}: {}...'.format(backup_dest, err)) 281 | continue 282 | 283 | # if dest is remote, open ssh connection 284 | if _type == 'ssh': 285 | dest_key = conf['dest_keys'].pop(0) if conf.get('dest_keys', None) else None 286 | # if 'ssh_source' is set, then 'compress' is already set and we use same compression for both source and dest 287 | # if not then we take the next entry in config 288 | if not ssh_source: 289 | compress = conf['compress'].pop(0) if conf.get('compress', None) else 'lzop' 290 | try: 291 | ssh_dest = SSH(user, host, port=port, key=dest_key, compress=compress) 292 | except (FileNotFoundError, SSHException): 293 | continue 294 | dest_name_log = '{:s}@{:s}:{:s}'.format(user, host, dest_name) 295 | else: 296 | ssh_dest = None 297 | dest_name_log = dest_name 298 | 299 | # check if dest exists 300 | try: 301 | zfs.open(dest_name, ssh=ssh_dest) 302 | except DatasetNotFoundError: 303 | if dest_auto_create: 304 | logger.info('Destination {:s} does not exist, will create it...'.format(dest_name_log)) 305 | if create_dataset(dest_name, dest_name_log, ssh=ssh_dest): 306 | continue 307 | else: 308 | logger.error('Destination {:s} does not exist, manually create it or use "dest-auto-create" option...' 309 | .format(dest_name_log)) 310 | continue 311 | except ValueError as err: 312 | logger.error(err) 313 | continue 314 | except CalledProcessError as err: 315 | logger.error('Error while opening dest {:s}: \'{:s}\'...' 316 | .format(dest_name_log, err.stderr.rstrip())) 317 | continue 318 | 319 | # Match children on source to children on dest 320 | dest_children_names = [child.name.replace(source_name, dest_name) for 321 | child in source_children] 322 | # Send all children to corresponding children on dest 323 | for source_fs, dest_name in zip(source_children, dest_children_names): 324 | # exclude filesystems from rules 325 | if any(fnmatch(source_fs.name, pattern) for pattern in exclude): 326 | logger.debug('Matched {} in exclude rules, not sending...'.format(source_fs)) 327 | continue 328 | # send not excluded filesystems 329 | for retry in range(1,retries+2): 330 | rc = send_filesystem(source_fs, dest_name, ssh_dest=ssh_dest, raw=raw, resume=resume) 331 | if rc == 2 and retry <= retries: 332 | logger.info('Retrying send in {:d}s (retry {:d} of {:d})...'.format(retry_interval, retry, retries)) 333 | sleep(retry_interval) 334 | else: 335 | break 336 | 337 | if ssh_dest: 338 | ssh_dest.close() 339 | 340 | if ssh_source: 341 | ssh_source.close() 342 | 343 | 344 | def create_dataset(name, name_log, ssh=None): 345 | """Creates a dataset and logs success/fail 346 | 347 | Parameters 348 | ---------- 349 | name : {str} 350 | Name of the dataset to be created 351 | name_log : {str} 352 | Name used for logging 353 | ssh : {SSH}, optional 354 | Open ssh connection, by default None 355 | 356 | Returns 357 | ------- 358 | int 359 | 0 if success, 1 if not 360 | """ 361 | logger = logging.getLogger(__name__) 362 | try: 363 | zfs.create(name, ssh=ssh, force=True) 364 | except CalledProcessError as err: 365 | message = err.stderr.rstrip() 366 | if message == "filesystem successfully created, but it may only be mounted by root": 367 | logger.info('Successfully created {:s}, but cannot mount as non-root...'.format(name_log)) 368 | return 0 369 | else: 370 | logger.info('Error while creating {}: \'{:s}\'...'.format(name_log, message)) 371 | return 1 372 | except Exception as err: 373 | logger.error('Error while creating {:s}: {}...'.format(name_log, err)) 374 | return 1 375 | else: 376 | logger.info('Successfully created {:s}...'.format(name_log)) 377 | return 0 378 | 379 | 380 | # def abort_resume(filesystem): 381 | # """Aborts the resumable receive state (deletes resume token) and logs success/fail 382 | 383 | # Parameters 384 | # ---------- 385 | # filesystem : {ZFSFilesystem} 386 | # Name of the receiving dataset to be aborted 387 | 388 | # Returns 389 | # ------- 390 | # int 391 | # 0 if success, 1 if not 392 | # """ 393 | # logger = logging.getLogger(__name__) 394 | # try: 395 | # filesystem.receive_abort() 396 | # except CalledProcessError as err: 397 | # logger.error('Error while aborting resumable receive state on {}: \'{:s}\'...' 398 | # .format(filesystem, err.stderr.rstrip())) 399 | # return 1 400 | # except Exception as err: 401 | # logger.error('Error while aborting resumable receive state on {}: {}...'.format(filesystem, err)) 402 | # return 1 403 | # else: 404 | # logger.info('Aborted resumable receive state on {:}...'.format(filesystem)) 405 | # return 0 406 | -------------------------------------------------------------------------------- /pyznap/ssh.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.ssh 3 | ~~~~~~~~~~~~~~ 4 | 5 | ssh connection. 6 | 7 | :copyright: (c) 2018-2019 by Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | import os 12 | import logging 13 | import subprocess as sp 14 | import pyznap.utils 15 | from datetime import datetime 16 | from .process import run 17 | 18 | 19 | class SSHException(Exception): 20 | """General ssh exception to be raised if anything fails""" 21 | pass 22 | 23 | 24 | class SSH: 25 | """SSH class. 26 | 27 | Attributes 28 | ------ 29 | logger : {logging.logger} 30 | logger to use 31 | user : {str} 32 | User to use 33 | host : {str} 34 | Host to connect to 35 | key : {str} 36 | Path to keyfile 37 | port : {int} 38 | Port number to connect to 39 | socket : {str} 40 | Path to socket file (used with '-o ControlPath') 41 | cmd : {list of str} 42 | ssh command to use with subprocess 43 | """ 44 | 45 | def __init__(self, user, host, key=None, port=22, compress=None): 46 | """Initializes SSH class. 47 | 48 | Parameters 49 | ---------- 50 | user : {str} 51 | User to use 52 | host : {str} 53 | Host to connect to 54 | key : {str}, optional 55 | Path to keyfile (the default is None, meaning the standard location 56 | '~/.ssh/id_rsa' will be checked) 57 | port : {int}, optional 58 | Port number to connect to (the default is 22) 59 | 60 | Raises 61 | ------ 62 | FileNotFoundError 63 | If keyfile does not exist 64 | SSHException 65 | General exception raised if anything goes wrong during ssh connection 66 | """ 67 | 68 | self.logger = logging.getLogger(__name__) 69 | 70 | self.user = user 71 | self.host = host 72 | self.port = port 73 | self.socket = '/tmp/pyznap_{:s}@{:s}:{:d}_{:s}'.format(self.user, self.host, self.port, 74 | datetime.now().strftime('%Y-%m-%d_%H:%M:%S')) 75 | self.key = key or os.path.expanduser('~/.ssh/id_rsa') 76 | 77 | if not os.path.isfile(self.key): 78 | self.logger.error('{} is not a valid ssh key file...'.format(self.key)) 79 | raise FileNotFoundError(self.key) 80 | 81 | self.cmd = ['ssh', '-i', self.key, '-o', 'ControlMaster=auto', '-o', 'ControlPersist=1m', 82 | '-o', 'ControlPath={:s}'.format(self.socket), '-p', str(self.port), 83 | '-o', 'ServerAliveInterval=30', '{:s}@{:s}'.format(self.user, self.host)] 84 | 85 | # setup ControlMaster. Process will hang if we call Popen with stderr=sp.PIPE, see 86 | # https://lists.mindrot.org/pipermail/openssh-unix-dev/2014-January/031976.html 87 | try: 88 | run(['exit'], timeout=10, ssh=self, stderr=sp.DEVNULL) 89 | except (sp.CalledProcessError, sp.TimeoutExpired): 90 | pass 91 | 92 | # check if ssh connection is up 93 | try: 94 | run(['exit'], timeout=5, check=True, stdout=sp.PIPE, stderr=sp.PIPE, ssh=self) 95 | except (sp.CalledProcessError, sp.TimeoutExpired) as err: 96 | message = err.stderr.rstrip().decode() if hasattr(err, 'stderr') else err 97 | 98 | self.logger.error('Error while connecting to {:s}@{:s}: {}...' 99 | .format(self.user, self.host, message)) 100 | self.close() 101 | raise SSHException(message) 102 | 103 | # set up compression 104 | self.compress, self.decompress = self.setup_compression(compress) 105 | # set up mbuffer 106 | self.mbuffer = self.setup_mbuffer() 107 | # set up pv 108 | self.pv = self.setup_pv() 109 | 110 | 111 | def setup_compression(self, _type): 112 | """Checks if compression algo is available on source and dest. 113 | 114 | Parameters 115 | ---------- 116 | _type : {str} 117 | Type of compression to use 118 | 119 | Returns 120 | ------- 121 | tuple(List(str)) 122 | Tuple of compress/decompress commands to use, (None, None) if compression is not available 123 | """ 124 | 125 | if _type == None or _type.lower() == 'none': 126 | return None, None 127 | 128 | # compress/decompress commands of different compression tools 129 | algos = {'gzip': (['gzip', '-3'], ['gzip', '-dc']), 130 | 'lzop': (['lzop'], ['lzop', '-dfc']), 131 | 'bzip2': (['bzip2'], ['bzip2', '-dfc']), 132 | 'pigz': (['pigz'], ['pigz', '-dc']), 133 | 'xz': (['xz'], ['xz', '-d']), 134 | 'lz4': (['lz4'], ['lz4', '-dc'])} 135 | 136 | if _type not in algos: 137 | self.logger.warning('Compression method {:s} not supported. Will continue without...'.format(_type)) 138 | return None, None 139 | 140 | from pyznap.utils import exists 141 | # check if compression is available on source and dest 142 | if not exists(_type): 143 | self.logger.warning('{:s} does not exist, continuing without compression...' 144 | .format(_type)) 145 | return None, None 146 | if not exists(_type, ssh=self): 147 | self.logger.warning('{:s} does not exist on {:s}@{:s}, continuing without compression...' 148 | .format(_type, self.user, self.host)) 149 | return None, None 150 | 151 | return algos[_type] 152 | 153 | 154 | def setup_mbuffer(self): 155 | """Checks if mbuffer is available on host 156 | 157 | Returns 158 | ------- 159 | List(str) 160 | mbuffer command to use on host 161 | """ 162 | 163 | from pyznap.utils import exists 164 | 165 | if exists('mbuffer', ssh=self): 166 | return lambda mem: ['mbuffer', '-q', '-s', '128K', '-m', '{:d}M'.format(mem)] 167 | else: 168 | return None 169 | 170 | def setup_pv(self): 171 | """Checks if pv is available on host 172 | 173 | Returns 174 | ------- 175 | List(str) 176 | pv command to use on host 177 | """ 178 | 179 | from pyznap.utils import exists 180 | 181 | if exists('pv', ssh=self): 182 | return lambda size: ['pv', '-f', '-w', '100', '-s', str(size)] 183 | else: 184 | return None 185 | 186 | 187 | def close(self): 188 | """Closes the ssh connection by invoking '-O exit' (deletes socket file)""" 189 | 190 | try: 191 | run(['-O', 'exit'], timeout=5, stderr=sp.PIPE, ssh=self) 192 | except (sp.CalledProcessError, sp.TimeoutExpired): 193 | pass 194 | 195 | 196 | def __del__(self): 197 | self.close() 198 | -------------------------------------------------------------------------------- /pyznap/take.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.take 3 | ~~~~~~~~~~~~~~ 4 | 5 | Take snapshots. 6 | 7 | :copyright: (c) 2018-2019 by Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | import logging 12 | from datetime import datetime, timedelta 13 | from subprocess import CalledProcessError 14 | from .ssh import SSH, SSHException 15 | from .utils import parse_name 16 | import pyznap.pyzfs as zfs 17 | from .process import DatasetBusyError, DatasetNotFoundError, DatasetExistsError 18 | 19 | 20 | def take_snap(filesystem, _type): 21 | """Takes a snapshot of type '_type' 22 | 23 | Parameters 24 | ---------- 25 | filesystem : {ZFSFilesystem} 26 | Filesystem to take snapshot of 27 | _type : {str} 28 | Type of snapshot to take 29 | """ 30 | 31 | logger = logging.getLogger(__name__) 32 | now = datetime.now 33 | 34 | snapname = lambda _type: 'pyznap_{:s}_{:s}'.format(now().strftime('%Y-%m-%d_%H:%M:%S'), _type) 35 | 36 | logger.info('Taking snapshot {}@{:s}...'.format(filesystem, snapname(_type))) 37 | try: 38 | filesystem.snapshot(snapname=snapname(_type), recursive=True) 39 | except (DatasetBusyError, DatasetExistsError) as err: 40 | logger.error(err) 41 | except CalledProcessError as err: 42 | logger.error('Error while taking snapshot {}@{:s}: \'{:s}\'...' 43 | .format(filesystem, snapname(_type), err.stderr.rstrip())) 44 | except KeyboardInterrupt: 45 | logger.error('KeyboardInterrupt while taking snapshot {}@{:s}...' 46 | .format(filesystem, snapname(_type))) 47 | raise 48 | 49 | 50 | def take_filesystem(filesystem, conf): 51 | """Takes snapshots of a single filesystem according to conf. 52 | 53 | Parameters: 54 | ---------- 55 | filesystem : {ZFSFilesystem} 56 | Filesystem to take snapshot of 57 | conf : {dict} 58 | Config entry with snapshot strategy 59 | """ 60 | 61 | logger = logging.getLogger(__name__) 62 | logger.debug('Taking snapshots on {}...'.format(filesystem)) 63 | now = datetime.now 64 | 65 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 66 | # catch exception if dataset was destroyed since pyznap was started 67 | try: 68 | fs_snapshots = filesystem.snapshots() 69 | except (DatasetNotFoundError, DatasetBusyError) as err: 70 | logger.error('Error while opening {}: {}...'.format(filesystem, err)) 71 | return 1 72 | # categorize snapshots 73 | for snap in fs_snapshots: 74 | # Ignore snapshots not taken with pyznap or sanoid 75 | if not snap.name.split('@')[1].startswith(('pyznap', 'autosnap')): 76 | continue 77 | try: 78 | _date, _time, snap_type = snap.name.split('_')[-3:] 79 | snap_time = datetime.strptime('{:s}_{:s}'.format(_date, _time), '%Y-%m-%d_%H:%M:%S') 80 | snapshots[snap_type].append((snap, snap_time)) 81 | except (ValueError, KeyError): 82 | continue 83 | 84 | # Reverse sort by time taken 85 | for snaps in snapshots.values(): 86 | snaps.reverse() 87 | 88 | if conf['yearly'] and (not snapshots['yearly'] or 89 | snapshots['yearly'][0][1].year != now().year): 90 | take_snap(filesystem, 'yearly') 91 | 92 | if conf['monthly'] and (not snapshots['monthly'] or 93 | snapshots['monthly'][0][1].month != now().month or 94 | now() - snapshots['monthly'][0][1] > timedelta(days=31)): 95 | take_snap(filesystem, 'monthly') 96 | 97 | if conf['weekly'] and (not snapshots['weekly'] or 98 | snapshots['weekly'][0][1].isocalendar()[1] != now().isocalendar()[1] or 99 | now() - snapshots['weekly'][0][1] > timedelta(days=7)): 100 | take_snap(filesystem, 'weekly') 101 | 102 | if conf['daily'] and (not snapshots['daily'] or 103 | snapshots['daily'][0][1].day != now().day or 104 | now() - snapshots['daily'][0][1] > timedelta(days=1)): 105 | take_snap(filesystem, 'daily') 106 | 107 | if conf['hourly'] and (not snapshots['hourly'] or 108 | snapshots['hourly'][0][1].hour != now().hour or 109 | now() - snapshots['hourly'][0][1] > timedelta(hours=1)): 110 | take_snap(filesystem, 'hourly') 111 | 112 | if conf['frequent'] and (not snapshots['frequent'] or 113 | snapshots['frequent'][0][1].minute != now().minute or 114 | now() - snapshots['frequent'][0][1] > timedelta(minutes=1)): 115 | take_snap(filesystem, 'frequent') 116 | 117 | 118 | def take_config(config): 119 | """Takes snapshots according to strategy given in config. 120 | 121 | Parameters: 122 | ---------- 123 | config : {list of dict} 124 | Full config list containing all strategies for different filesystems 125 | """ 126 | 127 | logger = logging.getLogger(__name__) 128 | logger.info('Taking snapshots...') 129 | 130 | for conf in config: 131 | if not conf.get('snap', None): 132 | continue 133 | 134 | name = conf['name'] 135 | try: 136 | _type, fsname, user, host, port = parse_name(name) 137 | except ValueError as err: 138 | logger.error('Could not parse {:s}: {}...'.format(name, err)) 139 | continue 140 | 141 | if _type == 'ssh': 142 | try: 143 | ssh = SSH(user, host, port=port, key=conf['key']) 144 | except (FileNotFoundError, SSHException): 145 | continue 146 | name_log = '{:s}@{:s}:{:s}'.format(user, host, fsname) 147 | else: 148 | ssh = None 149 | name_log = fsname 150 | 151 | try: 152 | # Children includes the base filesystem (named 'fsname') 153 | children = zfs.find(path=fsname, types=['filesystem', 'volume'], ssh=ssh) 154 | except DatasetNotFoundError as err: 155 | logger.error('Dataset {:s} does not exist...'.format(name_log)) 156 | continue 157 | except ValueError as err: 158 | logger.error(err) 159 | continue 160 | except CalledProcessError as err: 161 | logger.error('Error while opening {:s}: \'{:s}\'...' 162 | .format(name_log, err.stderr.rstrip())) 163 | continue 164 | else: 165 | # Take recursive snapshot of parent filesystem 166 | take_filesystem(children[0], conf) 167 | # Take snapshot of all children that don't have all snapshots yet 168 | for child in children[1:]: 169 | take_filesystem(child, conf) 170 | finally: 171 | if ssh: 172 | ssh.close() 173 | -------------------------------------------------------------------------------- /pyznap/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.utils 3 | ~~~~~~~~~~~~~~ 4 | 5 | Helper functions. 6 | 7 | :copyright: (c) 2018-2019 by Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | import os 12 | import re 13 | import logging 14 | from subprocess import Popen, PIPE, TimeoutExpired, CalledProcessError 15 | from .process import run 16 | from .ssh import SSHException 17 | 18 | from datetime import datetime 19 | from configparser import (ConfigParser, NoOptionError, MissingSectionHeaderError, 20 | DuplicateSectionError, DuplicateOptionError) 21 | from pkg_resources import resource_string 22 | 23 | 24 | def exists(executable='', ssh=None): 25 | """Tests if an executable exists on the system. 26 | 27 | Parameters: 28 | ---------- 29 | executable : {str}, optional 30 | Name of the executable to test (the default is an empty string) 31 | ssh : {SSH}, optional 32 | Open ssh connection (the default is None, which means check is done locally) 33 | 34 | Returns 35 | ------- 36 | bool 37 | True if executable exists, False if not 38 | """ 39 | 40 | logger = logging.getLogger(__name__) 41 | name_log = '{:s}@{:s}'.format(ssh.user, ssh.host) if ssh else 'localhost' 42 | 43 | cmd = ['which', executable] 44 | try: 45 | retcode = run(cmd, stdout=PIPE, stderr=PIPE, timeout=5, universal_newlines=True, ssh=ssh).returncode 46 | except (TimeoutExpired, SSHException) as err: 47 | logger.error('Error while checking if {:s} exists on {:s}: \'{}\'...' 48 | .format(executable, name_log, err)) 49 | return False 50 | 51 | return not bool(retcode) # return False if retcode != 0 52 | 53 | 54 | def read_config(path): 55 | """Reads a config file and outputs a list of dicts with the given snapshot strategy. 56 | 57 | Parameters: 58 | ---------- 59 | path : {str} 60 | Path to the config file 61 | 62 | Raises 63 | ------ 64 | FileNotFoundError 65 | If path does not exist 66 | 67 | Returns 68 | ------- 69 | list of dict 70 | Full config list containing all strategies for different filesystems 71 | """ 72 | 73 | logger = logging.getLogger(__name__) 74 | 75 | if not os.path.isfile(path): 76 | logger.error('Error while loading config: File {:s} does not exist.'.format(path)) 77 | return None 78 | 79 | parser = ConfigParser() 80 | try: 81 | parser.read(path) 82 | except (MissingSectionHeaderError, DuplicateSectionError, DuplicateOptionError) as e: 83 | logger.error('Error while loading config: {}'.format(e)) 84 | return None 85 | 86 | config = [] 87 | options = ['key', 'frequent', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'snap', 'clean', 88 | 'dest', 'dest_keys', 'compress', 'exclude', 'raw_send', 'resume', 'dest_auto_create', 89 | 'retries', 'retry_interval'] 90 | 91 | for section in parser.sections(): 92 | dic = {} 93 | config.append(dic) 94 | dic['name'] = section 95 | 96 | for option in options: 97 | try: 98 | value = parser.get(section, option) 99 | except NoOptionError: 100 | dic[option] = None 101 | else: 102 | if option in ['key']: 103 | dic[option] = value if os.path.isfile(value) else None 104 | elif option in ['frequent', 'hourly', 'daily', 'weekly', 'monthly', 'yearly']: 105 | dic[option] = int(value) 106 | elif option in ['snap', 'clean']: 107 | dic[option] = {'yes': True, 'no': False}.get(value.lower(), None) 108 | elif option in ['dest', 'compress']: 109 | dic[option] = [i.strip() for i in value.split(',')] 110 | elif option in ['dest_keys']: 111 | dic[option] = [i.strip() if os.path.isfile(i.strip()) else None 112 | for i in value.split(',')] 113 | elif option in ['exclude']: 114 | dic[option] = [[i.strip() for i in s.strip().split(' ')] if s.strip() else None 115 | for s in value.split(',')] 116 | elif option in ['raw_send', 'resume', 'dest_auto_create']: 117 | dic[option] = [{'yes': True, 'no': False}.get(i.strip().lower(), None) 118 | for i in value.split(',')] 119 | elif option in ['retries', 'retry_interval']: 120 | dic[option] = [int(i) for i in value.split(',')] 121 | # Pass through values recursively 122 | for parent in config: 123 | for child in config: 124 | if parent == child: 125 | continue 126 | child_parent = '/'.join(child['name'].split('/')[:-1]) # get parent of child filesystem 127 | if child_parent.startswith(parent['name']): 128 | for option in ['key', 'frequent', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 129 | 'snap', 'clean']: 130 | child[option] = child[option] if child[option] is not None else parent[option] 131 | # Sort by pathname 132 | config = sorted(config, key=lambda entry: entry['name'].split('/')) 133 | 134 | return config 135 | 136 | 137 | def parse_name(value): 138 | """Splits a string of the form 'ssh:port:user@host:rpool/data' into its parts separated by ':'. 139 | 140 | Parameters: 141 | ---------- 142 | value : {str} 143 | String to split up 144 | 145 | Returns 146 | ------- 147 | (str, str, str, str, int) 148 | Tuple containing the different parts of the string 149 | """ 150 | 151 | if value.startswith('ssh'): 152 | _type, port, host, fsname = value.split(':', maxsplit=3) 153 | port = int(port) if port else 22 154 | user, host = host.split('@', maxsplit=1) 155 | else: 156 | _type, user, host, port = 'local', None, None, None 157 | fsname = value 158 | return _type, fsname, user, host, port 159 | 160 | 161 | def create_config(path): 162 | """Initial configuration: Creates dir 'path' and puts sample config there 163 | 164 | Parameters 165 | ---------- 166 | path : str 167 | Path to dir where to store config file 168 | 169 | """ 170 | 171 | logger = logging.getLogger(__name__) 172 | 173 | CONFIG_FILE = os.path.join(path, 'pyznap.conf') 174 | config = resource_string(__name__, 'config/pyznap.conf').decode("utf-8") 175 | 176 | logger.info('Initial setup...') 177 | 178 | if not os.path.isdir(path): 179 | logger.info('Creating directory {:s}...'.format(path)) 180 | try: 181 | os.mkdir(path, mode=int('755', base=8)) 182 | except (PermissionError, FileNotFoundError, OSError) as e: 183 | logger.error('Could not create {:s}: {}'.format(path, e)) 184 | logger.error('Aborting setup...') 185 | return 1 186 | else: 187 | logger.info('Directory {:s} does already exist...'.format(path)) 188 | 189 | if not os.path.isfile(CONFIG_FILE): 190 | logger.info('Creating sample config {:s}...'.format(CONFIG_FILE)) 191 | try: 192 | with open(CONFIG_FILE, 'w') as file: 193 | file.write(config) 194 | except (PermissionError, FileNotFoundError, IOError, OSError) as e: 195 | logger.error('Could not write to file {:s}: {}'.format(CONFIG_FILE, e)) 196 | else: 197 | try: 198 | os.chmod(CONFIG_FILE, mode=int('644', base=8)) 199 | except (PermissionError, IOError, OSError) as e: 200 | logger.error('Could not set correct permissions on file {:s}. Please do so manually...' 201 | .format(CONFIG_FILE)) 202 | else: 203 | logger.info('File {:s} does already exist...'.format(CONFIG_FILE)) 204 | 205 | return 0 206 | 207 | 208 | def check_recv(fsname, ssh=None): 209 | """Checks if there is already a 'zfs receive' for that dataset ongoing 210 | 211 | Parameters 212 | ---------- 213 | fsname : str 214 | Name of the dataset 215 | ssh : SSH, optional 216 | Open ssh connection (the default is None, which means check is done locally) 217 | 218 | Returns 219 | ------- 220 | bool 221 | True if there is a 'zfs receive' ongoing or if an error is raised during checking. False if 222 | there is no 'zfs receive'. 223 | """ 224 | 225 | logger = logging.getLogger(__name__) 226 | fsname_log = '{:s}@{:s}:{:s}'.format(ssh.user, ssh.host, fsname) if ssh else fsname 227 | 228 | try: 229 | out = run(['ps', '-Ao', 'args='], stdout=PIPE, stderr=PIPE, timeout=5, 230 | universal_newlines=True, ssh=ssh).stdout 231 | except (TimeoutExpired, SSHException) as err: 232 | logger.error('Error while checking \'zfs receive\' on {:s}: \'{}\'...' 233 | .format(fsname_log, err)) 234 | return True 235 | except CalledProcessError as err: 236 | logger.error('Error while checking \'zfs receive\' on {:s}: \'{:s}\'...' 237 | .format(fsname_log, err.stderr.rstrip())) 238 | return True 239 | else: 240 | match = re.search(r'zfs (receive|recv).*({:s})(?=\n)'.format(fsname), out) 241 | if match: 242 | logger.error('Cannot send to {:s}, process \'{:s}\' already running...' 243 | .format(fsname_log, match.group())) 244 | return True 245 | 246 | return False 247 | 248 | 249 | def bytes_fmt(num): 250 | """Converts bytes to a human readable format 251 | 252 | Parameters 253 | ---------- 254 | num : int,float 255 | Number of bytes 256 | 257 | Returns 258 | ------- 259 | float 260 | Human readable format with binary prefixes 261 | """ 262 | 263 | for x in ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: 264 | if num < 1024: 265 | return "{:3.1f}{:s}".format(num, x) 266 | num /= 1024 267 | else: 268 | return "{:3.1f}{:s}".format(num, 'Y') 269 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [tool:pytest] 5 | addopts = -v -m 'not slow' 6 | testpaths = tests -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | setup 4 | ~~~~~~~~~~~~~~ 5 | 6 | pyznap installation using setuptools. 7 | 8 | :copyright: (c) 2018-2019 by Yannick Boetzel. 9 | :license: GPLv3, see LICENSE for more details. 10 | """ 11 | 12 | import os 13 | import re 14 | from setuptools import setup 15 | 16 | 17 | DIRNAME = os.path.dirname(os.path.abspath(__file__)) 18 | 19 | with open(os.path.join(DIRNAME, 'README.md'), 'r') as file: 20 | readme = file.read() 21 | 22 | with open(os.path.join(DIRNAME, 'pyznap/__init__.py'), 'r') as file: 23 | version = re.search(r'__version__ = \'(.*?)\'', file.read()).group(1) 24 | 25 | setup( 26 | name='pyznap', 27 | version=version, 28 | description='ZFS snapshot tool written in Python', 29 | long_description=readme, 30 | long_description_content_type="text/markdown", 31 | keywords='zfs snapshot backup', 32 | url='https://github.com/yboetz/pyznap', 33 | author='Yannick Boetzel', 34 | author_email='github@boetzel.ch', 35 | license='GPLv3', 36 | packages=['pyznap'], 37 | include_package_data=True, 38 | python_requires='>=3.5', 39 | extras_require={ 40 | 'dev': [ 41 | 'pytest', 42 | 'pytest-dependency', 43 | 'pytest-runner', 44 | 'paramiko>=2.4.2', 45 | ] 46 | }, 47 | classifiers=[ 48 | 'Development Status :: 5 - Production/Stable', 49 | 'Environment :: Console', 50 | 'Intended Audience :: End Users/Desktop', 51 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 52 | 'Operating System :: Unix', 53 | 'Programming Language :: Python', 54 | 'Programming Language :: Python :: 3', 55 | 'Programming Language :: Python :: 3.5', 56 | 'Programming Language :: Python :: 3.6', 57 | 'Programming Language :: Python :: 3.7', 58 | 'Topic :: System :: Archiving :: Backup', 59 | 'Topic :: System :: Filesystems', 60 | ], 61 | entry_points = { 62 | 'console_scripts': ['pyznap=pyznap.main:main'], 63 | }, 64 | zip_safe=False 65 | ) 66 | -------------------------------------------------------------------------------- /tests/test_functions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env pytest -v 2 | """ 3 | pyznap.test_functions 4 | ~~~~~~~~~~~~~~ 5 | 6 | Tests for pyznap functions. 7 | 8 | :copyright: (c) 2018-2019 by Yannick Boetzel. 9 | :license: GPLv3, see LICENSE for more details. 10 | """ 11 | 12 | import subprocess as sp 13 | import sys 14 | import os 15 | import logging 16 | import random 17 | import string 18 | import fnmatch 19 | from tempfile import NamedTemporaryFile 20 | from datetime import datetime 21 | import pytest 22 | 23 | import pyznap.pyzfs as zfs 24 | from pyznap.utils import read_config, parse_name 25 | from pyznap.clean import clean_config 26 | from pyznap.take import take_config 27 | from pyznap.send import send_config 28 | from pyznap.process import DatasetNotFoundError 29 | 30 | 31 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', 32 | datefmt='%b %d %H:%M:%S') 33 | logger = logging.getLogger(__name__) 34 | 35 | def randomword(length): 36 | letters = string.ascii_lowercase 37 | return ''.join(random.choice(letters) for i in range(length)) 38 | 39 | @pytest.fixture(scope='module') 40 | def zpools(): 41 | """Creates two temporary zpools to be called from test functions. Yields the two pool names 42 | and destroys them after testing.""" 43 | 44 | zpool = '/sbin/zpool' 45 | _word = randomword(8) 46 | pool0 = 'pyznap_source_' + _word 47 | pool1 = 'pyznap_dest_' + _word 48 | 49 | # Create temporary files on which the zpools are created 50 | with NamedTemporaryFile() as file0, NamedTemporaryFile() as file1: 51 | filename0 = file0.name 52 | filename1 = file1.name 53 | 54 | # Fix size to 100Mb 55 | file0.seek(100*1024**2-1) 56 | file0.write(b'0') 57 | file0.seek(0) 58 | file1.seek(100*1024**2-1) 59 | file1.write(b'0') 60 | file1.seek(0) 61 | 62 | # Create temporary test pools 63 | for pool, filename in zip([pool0, pool1], [filename0, filename1]): 64 | try: 65 | sp.check_call([zpool, 'create', pool, filename]) 66 | except sp.CalledProcessError as err: 67 | logger.error(err) 68 | return 69 | 70 | try: 71 | fs0 = zfs.open(pool0) 72 | fs1 = zfs.open(pool1) 73 | assert fs0.name == pool0 74 | assert fs1.name == pool1 75 | except (DatasetNotFoundError, AssertionError, Exception) as err: 76 | logger.error(err) 77 | else: 78 | yield fs0, fs1 79 | 80 | # Destroy temporary test pools 81 | for pool in [pool0, pool1]: 82 | try: 83 | sp.check_call([zpool, 'destroy', pool]) 84 | except sp.CalledProcessError as err: 85 | logger.error(err) 86 | 87 | 88 | class TestUtils(object): 89 | def test_read_config(self): 90 | with NamedTemporaryFile('w') as file: 91 | name = file.name 92 | file.write('[rpool/data]\n') 93 | file.write('hourly = 12\n') 94 | file.write('monthly = 0\n') 95 | file.write('clean = no\n') 96 | file.write('dest = backup/data, tank/data, rpool/data\n') 97 | file.write('compress = lzop, pigz, gzip\n\n') 98 | 99 | file.write('[rpool]\n') 100 | file.write('frequent = 4\n') 101 | file.write('hourly = 24\n') 102 | file.write('daily = 7\n') 103 | file.write('weekly = 4\n') 104 | file.write('monthly = 12\n') 105 | file.write('yearly = 2\n') 106 | file.write('snap = yes\n') 107 | file.write('clean = yes\n') 108 | file.write('dest = backup, tank\n\n') 109 | 110 | file.write('[rpool/data_2]\n') 111 | file.write('daily = 14\n') 112 | file.write('yearly = 0\n') 113 | file.write('clean = yes\n\n') 114 | 115 | file.write('[tank]\n') 116 | file.write('dest = backup/tank, rpool/tank, data/tank, zpool/tank\n') 117 | file.write('exclude = , tank/media/* tank/data* tank/home/*, tank/media* tank/home*\n') 118 | file.seek(0) 119 | 120 | config = read_config(name) 121 | conf0, conf1, conf2, conf3 = config 122 | 123 | assert conf0['name'] == 'rpool' 124 | assert conf0['key'] == None 125 | assert conf0['frequent'] == 4 126 | assert conf0['hourly'] == 24 127 | assert conf0['daily'] == 7 128 | assert conf0['weekly'] == 4 129 | assert conf0['monthly'] == 12 130 | assert conf0['yearly'] == 2 131 | assert conf0['snap'] == True 132 | assert conf0['clean'] == True 133 | assert conf0['dest'] == ['backup', 'tank'] 134 | assert conf0['dest_keys'] == None 135 | 136 | assert conf1['name'] == 'rpool/data' 137 | assert conf1['key'] == None 138 | assert conf1['frequent'] == 4 139 | assert conf1['hourly'] == 12 140 | assert conf1['daily'] == 7 141 | assert conf1['weekly'] == 4 142 | assert conf1['monthly'] == 0 143 | assert conf1['yearly'] == 2 144 | assert conf1['snap'] == True 145 | assert conf1['clean'] == False 146 | assert conf1['dest'] == ['backup/data', 'tank/data', 'rpool/data'] 147 | assert conf1['dest_keys'] == None 148 | assert conf1['compress'] == ['lzop', 'pigz', 'gzip'] 149 | 150 | assert conf2['name'] == 'rpool/data_2' 151 | assert conf2['key'] == None 152 | assert conf2['frequent'] == 4 153 | assert conf2['hourly'] == 24 154 | assert conf2['daily'] == 14 155 | assert conf2['weekly'] == 4 156 | assert conf2['monthly'] == 12 157 | assert conf2['yearly'] == 0 158 | assert conf2['snap'] == True 159 | assert conf2['clean'] == True 160 | assert conf2['dest'] == None 161 | assert conf2['dest_keys'] == None 162 | 163 | assert conf3['name'] == 'tank' 164 | assert conf3['dest'] == ['backup/tank', 'rpool/tank', 'data/tank', 'zpool/tank'] 165 | assert conf3['exclude'] == [None, ['tank/media/*', 'tank/data*', 'tank/home/*'], ['tank/media*', 'tank/home*']] 166 | 167 | 168 | 169 | def test_parse_name(self): 170 | _type, fsname, user, host, port = parse_name('ssh:23:user@hostname:rpool/data') 171 | assert _type == 'ssh' 172 | assert fsname == 'rpool/data' 173 | assert user == 'user' 174 | assert host == 'hostname' 175 | assert port == 23 176 | 177 | _type, fsname, user, host, port = parse_name('rpool/data') 178 | assert _type == 'local' 179 | assert fsname == 'rpool/data' 180 | assert user == None 181 | assert host == None 182 | assert port == None 183 | 184 | 185 | class TestSnapshot(object): 186 | @pytest.mark.dependency() 187 | def test_take_snapshot(self, zpools): 188 | fs, _ = zpools 189 | config = [{'name': fs.name, 'frequent': 1, 'hourly': 1, 'daily': 1, 'weekly': 1, 190 | 'monthly': 1, 'yearly': 1, 'snap': True}] 191 | take_config(config) 192 | take_config(config) 193 | 194 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 195 | for snap in fs.snapshots(): 196 | snap_type = snap.name.split('_')[-1] 197 | snapshots[snap_type].append(snap) 198 | 199 | for snap_type, snaps in snapshots.items(): 200 | assert len(snaps) == 1 201 | 202 | 203 | @pytest.mark.dependency(depends=['TestSnapshot::test_take_snapshot']) 204 | def test_clean_snapshot(self, zpools): 205 | fs, _ = zpools 206 | config = [{'name': fs.name, 'frequent': 0, 'hourly': 0, 'daily': 0, 'weekly': 0, 207 | 'monthly': 0, 'yearly': 0, 'clean': True}] 208 | clean_config(config) 209 | 210 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 211 | for snap in fs.snapshots(): 212 | snap_type = snap.name.split('_')[-1] 213 | snapshots[snap_type].append(snap) 214 | 215 | for snap_type, snaps in snapshots.items(): 216 | assert len(snaps) == config[0][snap_type] 217 | 218 | 219 | @pytest.mark.dependency(depends=['TestSnapshot::test_clean_snapshot']) 220 | def test_take_snapshot_recursive(self, zpools): 221 | fs, _ = zpools 222 | fs.destroy(force=True) 223 | config = [{'name': fs.name, 'frequent': 1, 'hourly': 1, 'daily': 1, 'weekly': 1, 224 | 'monthly': 1, 'yearly': 1, 'snap': True}] 225 | take_config(config) 226 | fs.snapshots()[-1].destroy(force=True) 227 | fs.snapshots()[-1].destroy(force=True) 228 | 229 | sub1 = zfs.create('{:s}/sub1'.format(fs.name)) 230 | abc = zfs.create('{:s}/sub1/abc'.format(fs.name)) 231 | sub1_abc = zfs.create('{:s}/sub1_abc'.format(fs.name)) 232 | config += [{'name': '{}/sub1'.format(fs), 'frequent': 1, 'hourly': 1, 'daily': 1, 'weekly': 1, 233 | 'monthly': 1, 'yearly': 1, 'snap': False}] 234 | take_config(config) 235 | 236 | # Check fs 237 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 238 | for snap in fs.snapshots(): 239 | snap_type = snap.name.split('_')[-1] 240 | snapshots[snap_type].append(snap) 241 | 242 | for snap_type, snaps in snapshots.items(): 243 | assert len(snaps) == config[0][snap_type] 244 | 245 | # Check sub1 246 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 247 | for snap in sub1.snapshots(): 248 | snap_type = snap.name.split('_')[-1] 249 | snapshots[snap_type].append(snap) 250 | 251 | for snap_type, snaps in snapshots.items(): 252 | assert len(snaps) == config[0][snap_type] 253 | 254 | # Check abc 255 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 256 | for snap in abc.snapshots(): 257 | snap_type = snap.name.split('_')[-1] 258 | snapshots[snap_type].append(snap) 259 | 260 | for snap_type, snaps in snapshots.items(): 261 | assert len(snaps) == config[0][snap_type] 262 | 263 | # Check sub1_abc 264 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 265 | for snap in sub1_abc.snapshots(): 266 | snap_type = snap.name.split('_')[-1] 267 | snapshots[snap_type].append(snap) 268 | 269 | for snap_type, snaps in snapshots.items(): 270 | assert len(snaps) == config[0][snap_type] 271 | 272 | 273 | @pytest.mark.dependency(depends=['TestSnapshot::test_take_snapshot_recursive']) 274 | def test_clean_recursive(self, zpools): 275 | fs, _ = zpools 276 | fs.destroy(force=True) 277 | sub1 = zfs.create('{:s}/sub1'.format(fs.name)) 278 | abc = zfs.create('{:s}/sub1/abc'.format(fs.name)) 279 | abc_efg = zfs.create('{:s}/sub1/abc_efg'.format(fs.name)) 280 | sub2 = zfs.create('{:s}/sub2'.format(fs.name)) 281 | efg = zfs.create('{:s}/sub2/efg'.format(fs.name)) 282 | hij = zfs.create('{:s}/sub2/efg/hij'.format(fs.name)) 283 | klm = zfs.create('{:s}/sub2/efg/hij/klm'.format(fs.name)) 284 | sub3 = zfs.create('{:s}/sub3'.format(fs.name)) 285 | 286 | config = [{'name': fs.name, 'frequent': 1, 'hourly': 1, 'daily': 1, 'weekly': 1, 287 | 'monthly': 1, 'yearly': 1, 'snap': True}] 288 | take_config(config) 289 | 290 | config = [{'name': fs.name, 'frequent': 1, 'hourly': 0, 'daily': 1, 'weekly': 0, 291 | 'monthly': 0, 'yearly': 0, 'clean': True}, 292 | {'name': '{}/sub2'.format(fs), 'frequent': 0, 'hourly': 1, 'daily': 0, 293 | 'weekly': 1, 'monthly': 0, 'yearly': 1, 'clean': True}, 294 | {'name': '{}/sub3'.format(fs), 'frequent': 1, 'hourly': 0, 'daily': 1, 295 | 'weekly': 0, 'monthly': 1, 'yearly': 0, 'clean': False}, 296 | {'name': '{}/sub1/abc'.format(fs), 'frequent': 0, 'hourly': 0, 'daily': 0, 297 | 'weekly': 1, 'monthly': 1, 'yearly': 1, 'clean': True}, 298 | {'name': '{}/sub2/efg/hij'.format(fs), 'frequent': 0, 'hourly': 0, 299 | 'daily': 0, 'weekly': 0, 'monthly': 0, 'yearly': 0, 'clean': True}] 300 | clean_config(config) 301 | 302 | # Check parent filesystem 303 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 304 | for snap in fs.snapshots(): 305 | snap_type = snap.name.split('_')[-1] 306 | snapshots[snap_type].append(snap) 307 | 308 | for snap_type, snaps in snapshots.items(): 309 | assert len(snaps) == config[0][snap_type] 310 | # Check sub1 311 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 312 | for snap in sub1.snapshots(): 313 | snap_type = snap.name.split('_')[-1] 314 | snapshots[snap_type].append(snap) 315 | 316 | for snap_type, snaps in snapshots.items(): 317 | assert len(snaps) == config[0][snap_type] 318 | # Check sub1/abc 319 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 320 | for snap in abc.snapshots(): 321 | snap_type = snap.name.split('_')[-1] 322 | snapshots[snap_type].append(snap) 323 | 324 | for snap_type, snaps in snapshots.items(): 325 | assert len(snaps) == config[3][snap_type] 326 | # Check sub1/abc_efg 327 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 328 | for snap in abc_efg.snapshots(): 329 | snap_type = snap.name.split('_')[-1] 330 | snapshots[snap_type].append(snap) 331 | 332 | for snap_type, snaps in snapshots.items(): 333 | assert len(snaps) == config[0][snap_type] 334 | # Check sub2 335 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 336 | for snap in sub2.snapshots(): 337 | snap_type = snap.name.split('_')[-1] 338 | snapshots[snap_type].append(snap) 339 | 340 | for snap_type, snaps in snapshots.items(): 341 | assert len(snaps) == config[1][snap_type] 342 | # Check sub2/efg 343 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 344 | for snap in efg.snapshots(): 345 | snap_type = snap.name.split('_')[-1] 346 | snapshots[snap_type].append(snap) 347 | 348 | for snap_type, snaps in snapshots.items(): 349 | assert len(snaps) == config[1][snap_type] 350 | # Check sub2/efg/hij 351 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 352 | for snap in hij.snapshots(): 353 | snap_type = snap.name.split('_')[-1] 354 | snapshots[snap_type].append(snap) 355 | 356 | for snap_type, snaps in snapshots.items(): 357 | assert len(snaps) == config[4][snap_type] 358 | # Check sub2/efg/hij/klm 359 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 360 | for snap in klm.snapshots(): 361 | snap_type = snap.name.split('_')[-1] 362 | snapshots[snap_type].append(snap) 363 | 364 | for snap_type, snaps in snapshots.items(): 365 | assert len(snaps) == config[4][snap_type] 366 | # Check sub3 367 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 368 | for snap in sub3.snapshots(): 369 | snap_type = snap.name.split('_')[-1] 370 | snapshots[snap_type].append(snap) 371 | 372 | for snap_type, snaps in snapshots.items(): 373 | assert len(snaps) == 1 374 | 375 | 376 | class TestSending(object): 377 | @pytest.mark.dependency() 378 | def test_send_full(self, zpools): 379 | """Checks if send_snap totally replicates a filesystem""" 380 | fs0, fs1 = zpools 381 | fs0.destroy(force=True) 382 | fs1.destroy(force=True) 383 | config = [{'name': fs0.name, 'dest': [fs1.name]}] 384 | 385 | fs0.snapshot('snap0') 386 | zfs.create('{:s}/sub1'.format(fs0.name)) 387 | fs0.snapshot('snap1', recursive=True) 388 | zfs.create('{:s}/sub2'.format(fs0.name)) 389 | fs0.snapshot('snap2', recursive=True) 390 | zfs.create('{:s}/sub3'.format(fs0.name)) 391 | fs0.snapshot('snap3', recursive=True) 392 | fs0.snapshot('snap4', recursive=True) 393 | fs0.snapshot('snap5', recursive=True) 394 | zfs.create('{:s}/sub3/abc'.format(fs0.name)) 395 | fs0.snapshot('snap6', recursive=True) 396 | zfs.create('{:s}/sub3/abc_abc'.format(fs0.name)) 397 | fs0.snapshot('snap7', recursive=True) 398 | zfs.create('{:s}/sub3/efg'.format(fs0.name)) 399 | fs0.snapshot('snap8', recursive=True) 400 | fs0.snapshot('snap9', recursive=True) 401 | send_config(config) 402 | 403 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 404 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 405 | assert set(fs0_children) == set(fs1_children) 406 | 407 | 408 | @pytest.mark.dependency(depends=['TestSending::test_send_full']) 409 | def test_send_incremental(self, zpools): 410 | fs0, fs1 = zpools 411 | fs0.destroy(force=True) 412 | fs1.destroy(force=True) 413 | config = [{'name': fs0.name, 'dest': [fs1.name]}] 414 | 415 | fs0.snapshot('snap0', recursive=True) 416 | zfs.create('{:s}/sub1'.format(fs0.name)) 417 | fs0.snapshot('snap1', recursive=True) 418 | send_config(config) 419 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 420 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 421 | assert set(fs0_children) == set(fs1_children) 422 | 423 | zfs.create('{:s}/sub2'.format(fs0.name)) 424 | fs0.snapshot('snap2', recursive=True) 425 | send_config(config) 426 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 427 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 428 | assert set(fs0_children) == set(fs1_children) 429 | 430 | zfs.create('{:s}/sub3'.format(fs0.name)) 431 | fs0.snapshot('snap3', recursive=True) 432 | send_config(config) 433 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 434 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 435 | assert set(fs0_children) == set(fs1_children) 436 | 437 | 438 | @pytest.mark.dependency(depends=['TestSending::test_send_incremental']) 439 | def test_send_delete_snapshot(self, zpools): 440 | fs0, fs1 = zpools 441 | config = [{'name': fs0.name, 'dest': [fs1.name]}] 442 | 443 | # Delete recent snapshots on dest 444 | fs1.snapshots()[-1].destroy(force=True) 445 | fs1.snapshots()[-1].destroy(force=True) 446 | send_config(config) 447 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 448 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 449 | assert set(fs0_children) == set(fs1_children) 450 | 451 | # Delete recent snapshot on source 452 | fs0.snapshot('snap4', recursive=True) 453 | send_config(config) 454 | fs0.snapshots()[-1].destroy(force=True) 455 | fs0.snapshot('snap5', recursive=True) 456 | send_config(config) 457 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 458 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 459 | assert set(fs0_children) == set(fs1_children) 460 | 461 | 462 | @pytest.mark.dependency(depends=['TestSending::test_send_delete_snapshot']) 463 | def test_send_delete_sub(self, zpools): 464 | fs0, fs1 = zpools 465 | config = [{'name': fs0.name, 'dest': [fs1.name]}] 466 | 467 | # Delete subfilesystems 468 | sub3 = fs1.filesystems()[-1] 469 | sub3.destroy(force=True) 470 | fs0.snapshot('snap6', recursive=True) 471 | sub2 = fs1.filesystems()[-1] 472 | sub2.destroy(force=True) 473 | send_config(config) 474 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 475 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 476 | assert set(fs0_children) == set(fs1_children) 477 | 478 | 479 | @pytest.mark.dependency(depends=['TestSending::test_send_delete_sub']) 480 | def test_send_delete_old(self, zpools): 481 | fs0, fs1 = zpools 482 | config = [{'name': fs0.name, 'dest': [fs1.name]}] 483 | 484 | # Delete old snapshot on source 485 | fs0.snapshots()[0].destroy(force=True) 486 | fs0.snapshot('snap7', recursive=True) 487 | send_config(config) 488 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 489 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 490 | assert not (set(fs0_children) == set(fs1_children)) 491 | # Assert that snap0 was not deleted from fs1 492 | for child in set(fs1_children) - set(fs0_children): 493 | assert child.endswith('snap0') 494 | 495 | @pytest.mark.dependency() 496 | def test_send_exclude(self, zpools): 497 | """Checks if exclude rules work""" 498 | fs0, fs1 = zpools 499 | fs0.destroy(force=True) 500 | fs1.destroy(force=True) 501 | 502 | exclude = ['*/sub1', '*/sub3/abc', '*/sub3/efg'] 503 | config = [{'name': fs0.name, 'dest': [fs1.name], 'exclude': [exclude]}] 504 | 505 | zfs.create('{:s}/sub1'.format(fs0.name)) 506 | zfs.create('{:s}/sub2'.format(fs0.name)) 507 | zfs.create('{:s}/sub3'.format(fs0.name)) 508 | zfs.create('{:s}/sub3/abc'.format(fs0.name)) 509 | zfs.create('{:s}/sub3/abc_abc'.format(fs0.name)) 510 | zfs.create('{:s}/sub3/efg'.format(fs0.name)) 511 | fs0.snapshot('snap', recursive=True) 512 | send_config(config) 513 | 514 | fs0_children = set([child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]]) 515 | fs1_children = set([child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]]) 516 | # remove unwanted datasets/snapshots 517 | for match in exclude: 518 | fs0_children -= set(fnmatch.filter(fs0_children, match)) 519 | fs0_children -= set(fnmatch.filter(fs0_children, match + '@snap')) 520 | 521 | assert set(fs0_children) == set(fs1_children) 522 | 523 | @pytest.mark.dependency() 524 | def test_send_raw(self, zpools): 525 | """Checks if raw_send works""" 526 | fs0, fs1 = zpools 527 | fs0.destroy(force=True) 528 | fs1.destroy(force=True) 529 | 530 | raw_send = ['yes'] 531 | config = [{'name': fs0.name, 'dest': [fs1.name], 'raw_send': raw_send}] 532 | 533 | zfs.create('{:s}/sub1'.format(fs0.name), props={'compression':'gzip'}) 534 | zfs.create('{:s}/sub2'.format(fs0.name), props={'compression':'lz4'}) 535 | zfs.create('{:s}/sub3'.format(fs0.name), props={'compression':'gzip'}) 536 | zfs.create('{:s}/sub3/abc'.format(fs0.name)) 537 | zfs.create('{:s}/sub3/abc_abc'.format(fs0.name)) 538 | zfs.create('{:s}/sub3/efg'.format(fs0.name)) 539 | fs0.snapshot('snap', recursive=True) 540 | send_config(config) 541 | 542 | fs0_children = set([child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]]) 543 | fs1_children = set([child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]]) 544 | 545 | assert set(fs0_children) == set(fs1_children) 546 | -------------------------------------------------------------------------------- /tests/test_functions_ssh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env pytest -v 2 | """ 3 | pyznap.test_functions_ssh 4 | ~~~~~~~~~~~~~~ 5 | 6 | ssh tests for pyznap functions. 7 | 8 | :copyright: (c) 2018-2019 by Yannick Boetzel. 9 | :license: GPLv3, see LICENSE for more details. 10 | """ 11 | 12 | import subprocess as sp 13 | import sys 14 | import os 15 | import random 16 | import string 17 | import fnmatch 18 | import logging 19 | from tempfile import NamedTemporaryFile 20 | from datetime import datetime 21 | import pytest 22 | 23 | import pyznap.pyzfs as zfs 24 | from pyznap.utils import read_config, parse_name 25 | from test_utils import open_ssh 26 | from pyznap.ssh import SSH 27 | from pyznap.clean import clean_config 28 | from pyznap.take import take_config 29 | from pyznap.send import send_config 30 | from pyznap.process import run, DatasetNotFoundError 31 | 32 | 33 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', 34 | datefmt='%b %d %H:%M:%S') 35 | logger = logging.getLogger(__name__) 36 | logging.getLogger("paramiko").setLevel(logging.ERROR) 37 | 38 | def randomword(length): 39 | letters = string.ascii_lowercase 40 | return ''.join(random.choice(letters) for i in range(length)) 41 | 42 | # ssh connection to dest 43 | USER = 'root' 44 | HOST = '127.0.0.1' 45 | PORT = 22 46 | KEY = None 47 | 48 | @pytest.fixture(scope='module') 49 | def zpools(): 50 | """Creates two temporary zpools to be called from test functions, source is local and dest on 51 | remote ssh location. Yields the two pool names and destroys them after testing.""" 52 | 53 | zpool = '/sbin/zpool' 54 | _word = randomword(8) 55 | pool0 = 'pyznap_source_' + _word 56 | pool1 = 'pyznap_dest_' + _word 57 | 58 | sftp_filename = '/tmp/' + randomword(10) 59 | 60 | # ssh arguments for zfs functions 61 | ssh = SSH(USER, HOST, port=PORT, key=KEY) 62 | # need paramiko for sftp file 63 | sshclient = open_ssh(USER, HOST, port=PORT, key=KEY) 64 | sftp = sshclient.open_sftp() 65 | 66 | # Create temporary file on which the source zpool is created. Manually create sftp file 67 | with NamedTemporaryFile() as file0, sftp.open(sftp_filename, 'w') as file1: 68 | filename0 = file0.name 69 | filename1 = sftp_filename 70 | 71 | # Fix size to 100Mb 72 | file0.seek(100*1024**2-1) 73 | file0.write(b'0') 74 | file0.seek(0) 75 | file1.seek(100*1024**2-1) 76 | file1.write(b'0') 77 | file1.seek(0) 78 | 79 | # Create temporary test pools 80 | try: 81 | run([zpool, 'create', pool0, filename0]) 82 | except sp.CalledProcessError as err: 83 | logger.error(err) 84 | return 85 | 86 | try: 87 | run([zpool, 'create', pool1, filename1], ssh=ssh) 88 | except sp.CalledProcessError as err: 89 | logger.error(err) 90 | return 91 | 92 | try: 93 | fs0 = zfs.open(pool0) 94 | fs1 = zfs.open(pool1, ssh=ssh) 95 | assert fs0.name == pool0 96 | assert fs1.name == pool1 97 | except (DatasetNotFoundError, AssertionError, Exception) as err: 98 | logger.error(err) 99 | else: 100 | yield fs0, fs1 101 | 102 | # Destroy temporary test pools 103 | try: 104 | run([zpool, 'destroy', pool0]) 105 | except sp.CalledProcessError as err: 106 | logger.error(err) 107 | 108 | try: 109 | run([zpool, 'destroy', pool1], ssh=ssh) 110 | except sp.CalledProcessError as err: 111 | logger.error(err) 112 | 113 | # Delete tempfile on dest 114 | sftp.remove(sftp_filename) 115 | sftp.close() 116 | ssh.close() 117 | 118 | 119 | class TestSnapshot(object): 120 | @pytest.mark.dependency() 121 | def test_take_snapshot(self, zpools): 122 | _, fs = zpools 123 | 124 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs), 'key': KEY, 'frequent': 1, 'hourly': 1, 125 | 'daily': 1, 'weekly': 1, 'monthly': 1, 'yearly': 1, 'snap': True}] 126 | take_config(config) 127 | take_config(config) 128 | 129 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 130 | for snap in fs.snapshots(): 131 | snap_type = snap.name.split('_')[-1] 132 | snapshots[snap_type].append(snap) 133 | 134 | for snap_type, snaps in snapshots.items(): 135 | assert len(snaps) == 1 136 | 137 | 138 | @pytest.mark.dependency(depends=['TestSnapshot::test_take_snapshot']) 139 | def test_clean_snapshot(self, zpools): 140 | _, fs = zpools 141 | 142 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs), 'key': KEY, 'frequent': 0, 'hourly': 0, 143 | 'daily': 0, 'weekly': 0, 'monthly': 0, 'yearly': 0, 'clean': True}] 144 | clean_config(config) 145 | 146 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 147 | for snap in fs.snapshots(): 148 | snap_type = snap.name.split('_')[-1] 149 | snapshots[snap_type].append(snap) 150 | 151 | for snap_type, snaps in snapshots.items(): 152 | assert len(snaps) == config[0][snap_type] 153 | 154 | 155 | @pytest.mark.dependency(depends=['TestSnapshot::test_clean_snapshot']) 156 | def test_take_snapshot_recursive(self, zpools): 157 | _, fs = zpools 158 | ssh = fs.ssh 159 | 160 | fs.destroy(force=True) 161 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs), 'key': KEY, 'frequent': 1, 'hourly': 1, 162 | 'daily': 1, 'weekly': 1, 'monthly': 1, 'yearly': 1, 'snap': True}] 163 | take_config(config) 164 | fs.snapshots()[-1].destroy(force=True) 165 | fs.snapshots()[-1].destroy(force=True) 166 | 167 | sub1 = zfs.create('{:s}/sub1'.format(fs.name), ssh=ssh) 168 | abc = zfs.create('{:s}/sub1/abc'.format(fs.name), ssh=ssh) 169 | sub1_abc = zfs.create('{:s}/sub1_abc'.format(fs.name), ssh=ssh) 170 | config += [{'name': 'ssh:{:d}:{}/sub1'.format(PORT, fs), 'key': KEY, 'frequent': 1, 'hourly': 1, 171 | 'daily': 1, 'weekly': 1, 'monthly': 1, 'yearly': 1, 'snap': False}] 172 | take_config(config) 173 | 174 | # Check fs 175 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 176 | for snap in fs.snapshots(): 177 | snap_type = snap.name.split('_')[-1] 178 | snapshots[snap_type].append(snap) 179 | 180 | for snap_type, snaps in snapshots.items(): 181 | assert len(snaps) == config[0][snap_type] 182 | 183 | # Check sub1 184 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 185 | for snap in sub1.snapshots(): 186 | snap_type = snap.name.split('_')[-1] 187 | snapshots[snap_type].append(snap) 188 | 189 | for snap_type, snaps in snapshots.items(): 190 | assert len(snaps) == config[0][snap_type] 191 | 192 | # Check abc 193 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 194 | for snap in abc.snapshots(): 195 | snap_type = snap.name.split('_')[-1] 196 | snapshots[snap_type].append(snap) 197 | 198 | for snap_type, snaps in snapshots.items(): 199 | assert len(snaps) == config[0][snap_type] 200 | 201 | # Check sub1_abc 202 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 203 | for snap in sub1_abc.snapshots(): 204 | snap_type = snap.name.split('_')[-1] 205 | snapshots[snap_type].append(snap) 206 | 207 | for snap_type, snaps in snapshots.items(): 208 | assert len(snaps) == config[0][snap_type] 209 | 210 | 211 | @pytest.mark.dependency(depends=['TestSnapshot::test_take_snapshot_recursive']) 212 | def test_clean_recursive(self, zpools): 213 | _, fs = zpools 214 | ssh = fs.ssh 215 | 216 | fs.destroy(force=True) 217 | sub1 = zfs.create('{:s}/sub1'.format(fs.name), ssh=ssh) 218 | abc = zfs.create('{:s}/sub1/abc'.format(fs.name), ssh=ssh) 219 | abc_efg = zfs.create('{:s}/sub1/abc_efg'.format(fs.name), ssh=ssh) 220 | sub2 = zfs.create('{:s}/sub2'.format(fs.name), ssh=ssh) 221 | efg = zfs.create('{:s}/sub2/efg'.format(fs.name), ssh=ssh) 222 | hij = zfs.create('{:s}/sub2/efg/hij'.format(fs.name), ssh=ssh) 223 | klm = zfs.create('{:s}/sub2/efg/hij/klm'.format(fs.name), ssh=ssh) 224 | sub3 = zfs.create('{:s}/sub3'.format(fs.name), ssh=ssh) 225 | 226 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs), 'key': KEY, 'frequent': 1, 'hourly': 1, 227 | 'daily': 1, 'weekly': 1, 'monthly': 1, 'yearly': 1, 'snap': True}] 228 | take_config(config) 229 | 230 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs), 'key': KEY, 'frequent': 1, 'hourly': 0, 231 | 'daily': 1, 'weekly': 0, 'monthly': 0, 'yearly': 0, 'clean': True}, 232 | {'name': 'ssh:{:d}:{}/sub2'.format(PORT, fs), 'key': KEY, 'frequent': 0, 233 | 'hourly': 1, 'daily': 0, 'weekly': 1, 'monthly': 0, 'yearly': 1, 'clean': True}, 234 | {'name': 'ssh:{:d}:{}/sub3'.format(PORT, fs), 'key': KEY, 'frequent': 1, 235 | 'hourly': 0, 'daily': 1, 'weekly': 0, 'monthly': 1, 'yearly': 0, 'clean': False}, 236 | {'name': 'ssh:{:d}:{}/sub1/abc'.format(PORT, fs), 'key': KEY, 'frequent': 0, 237 | 'hourly': 0,'daily': 0, 'weekly': 1, 'monthly': 1, 'yearly': 1, 'clean': True}, 238 | {'name': 'ssh:{:d}:{}/sub2/efg/hij'.format(PORT, fs), 'key': KEY, 'frequent': 0, 239 | 'hourly': 0, 'daily': 0, 'weekly': 0, 'monthly': 0, 'yearly': 0, 'clean': True}] 240 | clean_config(config) 241 | 242 | # Check parent filesystem 243 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 244 | for snap in fs.snapshots(): 245 | snap_type = snap.name.split('_')[-1] 246 | snapshots[snap_type].append(snap) 247 | 248 | for snap_type, snaps in snapshots.items(): 249 | assert len(snaps) == config[0][snap_type] 250 | # Check sub1 251 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 252 | for snap in sub1.snapshots(): 253 | snap_type = snap.name.split('_')[-1] 254 | snapshots[snap_type].append(snap) 255 | 256 | for snap_type, snaps in snapshots.items(): 257 | assert len(snaps) == config[0][snap_type] 258 | # Check sub1/abc 259 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 260 | for snap in abc.snapshots(): 261 | snap_type = snap.name.split('_')[-1] 262 | snapshots[snap_type].append(snap) 263 | 264 | for snap_type, snaps in snapshots.items(): 265 | assert len(snaps) == config[3][snap_type] 266 | # Check sub1/abc_efg 267 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 268 | for snap in abc_efg.snapshots(): 269 | snap_type = snap.name.split('_')[-1] 270 | snapshots[snap_type].append(snap) 271 | 272 | for snap_type, snaps in snapshots.items(): 273 | assert len(snaps) == config[0][snap_type] 274 | # Check sub2 275 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 276 | for snap in sub2.snapshots(): 277 | snap_type = snap.name.split('_')[-1] 278 | snapshots[snap_type].append(snap) 279 | 280 | for snap_type, snaps in snapshots.items(): 281 | assert len(snaps) == config[1][snap_type] 282 | # Check sub2/efg 283 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 284 | for snap in efg.snapshots(): 285 | snap_type = snap.name.split('_')[-1] 286 | snapshots[snap_type].append(snap) 287 | 288 | for snap_type, snaps in snapshots.items(): 289 | assert len(snaps) == config[1][snap_type] 290 | # Check sub2/efg/hij 291 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 292 | for snap in hij.snapshots(): 293 | snap_type = snap.name.split('_')[-1] 294 | snapshots[snap_type].append(snap) 295 | 296 | for snap_type, snaps in snapshots.items(): 297 | assert len(snaps) == config[4][snap_type] 298 | # Check sub2/efg/hij/klm 299 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 300 | for snap in klm.snapshots(): 301 | snap_type = snap.name.split('_')[-1] 302 | snapshots[snap_type].append(snap) 303 | 304 | for snap_type, snaps in snapshots.items(): 305 | assert len(snaps) == config[4][snap_type] 306 | # Check sub3 307 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 308 | for snap in sub3.snapshots(): 309 | snap_type = snap.name.split('_')[-1] 310 | snapshots[snap_type].append(snap) 311 | 312 | for snap_type, snaps in snapshots.items(): 313 | assert len(snaps) == 1 314 | 315 | 316 | class TestSending(object): 317 | @pytest.mark.dependency() 318 | def test_send_full(self, zpools): 319 | """Checks if send_snap totally replicates a filesystem""" 320 | fs0, fs1 = zpools 321 | ssh = fs1.ssh 322 | 323 | fs0.destroy(force=True) 324 | fs1.destroy(force=True) 325 | 326 | fs0.snapshot('snap0') 327 | zfs.create('{:s}/sub1'.format(fs0.name)) 328 | fs0.snapshot('snap1', recursive=True) 329 | zfs.create('{:s}/sub2'.format(fs0.name)) 330 | fs0.snapshot('snap2', recursive=True) 331 | zfs.create('{:s}/sub3'.format(fs0.name)) 332 | fs0.snapshot('snap3', recursive=True) 333 | fs0.snapshot('snap4', recursive=True) 334 | fs0.snapshot('snap5', recursive=True) 335 | zfs.create('{:s}/sub3/abc'.format(fs0.name)) 336 | fs0.snapshot('snap6', recursive=True) 337 | zfs.create('{:s}/sub3/abc_abc'.format(fs0.name)) 338 | fs0.snapshot('snap7', recursive=True) 339 | zfs.create('{:s}/sub3/efg'.format(fs0.name)) 340 | fs0.snapshot('snap8', recursive=True) 341 | fs0.snapshot('snap9', recursive=True) 342 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 343 | send_config(config) 344 | 345 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 346 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 347 | assert set(fs0_children) == set(fs1_children) 348 | 349 | 350 | @pytest.mark.dependency(depends=['TestSending::test_send_full']) 351 | def test_send_incremental(self, zpools): 352 | fs0, fs1 = zpools 353 | ssh = fs1.ssh 354 | 355 | fs0.destroy(force=True) 356 | fs1.destroy(force=True) 357 | 358 | fs0.snapshot('snap0', recursive=True) 359 | zfs.create('{:s}/sub1'.format(fs0.name)) 360 | fs0.snapshot('snap1', recursive=True) 361 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 362 | send_config(config) 363 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 364 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 365 | assert set(fs0_children) == set(fs1_children) 366 | 367 | zfs.create('{:s}/sub2'.format(fs0.name)) 368 | fs0.snapshot('snap2', recursive=True) 369 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 370 | send_config(config) 371 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 372 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 373 | assert set(fs0_children) == set(fs1_children) 374 | 375 | zfs.create('{:s}/sub3'.format(fs0.name)) 376 | fs0.snapshot('snap3', recursive=True) 377 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 378 | send_config(config) 379 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 380 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 381 | assert set(fs0_children) == set(fs1_children) 382 | 383 | 384 | @pytest.mark.dependency(depends=['TestSending::test_send_incremental']) 385 | def test_send_delete_snapshot(self, zpools): 386 | fs0, fs1 = zpools 387 | ssh = fs1.ssh 388 | 389 | # Delete recent snapshots on dest 390 | fs1.snapshots()[-1].destroy(force=True) 391 | fs1.snapshots()[-1].destroy(force=True) 392 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 393 | send_config(config) 394 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 395 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 396 | assert set(fs0_children) == set(fs1_children) 397 | 398 | # Delete recent snapshot on source 399 | fs0.snapshot('snap4', recursive=True) 400 | send_config(config) 401 | fs0.snapshots()[-1].destroy(force=True) 402 | fs0.snapshot('snap5', recursive=True) 403 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 404 | send_config(config) 405 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 406 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 407 | assert set(fs0_children) == set(fs1_children) 408 | 409 | 410 | @pytest.mark.dependency(depends=['TestSending::test_send_delete_snapshot']) 411 | def test_send_delete_sub(self, zpools): 412 | fs0, fs1 = zpools 413 | ssh = fs1.ssh 414 | 415 | # Delete subfilesystems 416 | sub3 = fs1.filesystems()[-1] 417 | sub3.destroy(force=True) 418 | fs0.snapshot('snap6', recursive=True) 419 | sub2 = fs1.filesystems()[-1] 420 | sub2.destroy(force=True) 421 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 422 | send_config(config) 423 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 424 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 425 | assert set(fs0_children) == set(fs1_children) 426 | 427 | 428 | @pytest.mark.dependency(depends=['TestSending::test_send_delete_sub']) 429 | def test_send_delete_old(self, zpools): 430 | fs0, fs1 = zpools 431 | ssh = fs1.ssh 432 | 433 | # Delete old snapshot on source 434 | fs0.snapshots()[0].destroy(force=True) 435 | fs0.snapshot('snap7', recursive=True) 436 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': None}] 437 | send_config(config) 438 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 439 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 440 | assert not (set(fs0_children) == set(fs1_children)) 441 | # Assert that snap0 was not deleted from fs1 442 | for child in set(fs1_children) - set(fs0_children): 443 | assert child.endswith('snap0') 444 | 445 | 446 | @pytest.mark.dependency() 447 | def test_send_exclude(self, zpools): 448 | """Checks if send_snap totally replicates a filesystem""" 449 | fs0, fs1 = zpools 450 | ssh = fs1.ssh 451 | fs0.destroy(force=True) 452 | fs1.destroy(force=True) 453 | 454 | exclude = ['*/sub1', '*/sub3/abc', '*/sub3/efg'] 455 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'exclude': [exclude]}] 456 | 457 | zfs.create('{:s}/sub1'.format(fs0.name)) 458 | zfs.create('{:s}/sub2'.format(fs0.name)) 459 | zfs.create('{:s}/sub3'.format(fs0.name)) 460 | zfs.create('{:s}/sub3/abc'.format(fs0.name)) 461 | zfs.create('{:s}/sub3/abc_abc'.format(fs0.name)) 462 | zfs.create('{:s}/sub3/efg'.format(fs0.name)) 463 | fs0.snapshot('snap', recursive=True) 464 | send_config(config) 465 | 466 | fs0_children = set([child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]]) 467 | fs1_children = set([child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]]) 468 | # remove unwanted datasets/snapshots 469 | for match in exclude: 470 | fs0_children -= set(fnmatch.filter(fs0_children, match)) 471 | fs0_children -= set(fnmatch.filter(fs0_children, match + '@snap')) 472 | 473 | assert set(fs0_children) == set(fs1_children) 474 | 475 | 476 | @pytest.mark.dependency() 477 | def test_send_compress(self, zpools): 478 | """Checks if send_snap totally replicates a filesystem""" 479 | fs0, fs1 = zpools 480 | ssh = fs1.ssh 481 | 482 | fs0.destroy(force=True) 483 | fs1.destroy(force=True) 484 | 485 | fs0.snapshot('snap0') 486 | zfs.create('{:s}/sub1'.format(fs0.name)) 487 | fs0.snapshot('snap1', recursive=True) 488 | zfs.create('{:s}/sub2'.format(fs0.name)) 489 | fs0.snapshot('snap2', recursive=True) 490 | fs0.snapshot('snap3', recursive=True) 491 | zfs.create('{:s}/sub2/abc'.format(fs0.name)) 492 | fs0.snapshot('snap4', recursive=True) 493 | fs0.snapshot('snap5', recursive=True) 494 | 495 | for compression in ['none', 'abc', 'lzop', 'gzip', 'pigz', 'bzip2', 'xz', 'lz4']: 496 | fs1.destroy(force=True) 497 | config = [{'name': fs0.name, 'dest': ['ssh:{:d}:{}'.format(PORT, fs1)], 'dest_keys': [KEY], 'compress': [compression]}] 498 | send_config(config) 499 | 500 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 501 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 502 | assert set(fs0_children) == set(fs1_children) 503 | 504 | 505 | class TestSendingPull(object): 506 | """Checks if snapshots can be pulled from a remote source""" 507 | 508 | @pytest.mark.dependency() 509 | def test_send_full(self, zpools): 510 | """Checks if send_snap totally replicates a filesystem""" 511 | fs1, fs0 = zpools # here fs0 is the remote pool 512 | ssh = fs0.ssh 513 | 514 | fs0.destroy(force=True) 515 | fs1.destroy(force=True) 516 | 517 | fs0.snapshot('snap0') 518 | zfs.create('{:s}/sub1'.format(fs0.name), ssh=ssh) 519 | fs0.snapshot('snap1', recursive=True) 520 | zfs.create('{:s}/sub2'.format(fs0.name), ssh=ssh) 521 | fs0.snapshot('snap2', recursive=True) 522 | zfs.create('{:s}/sub3'.format(fs0.name), ssh=ssh) 523 | fs0.snapshot('snap3', recursive=True) 524 | fs0.snapshot('snap4', recursive=True) 525 | fs0.snapshot('snap5', recursive=True) 526 | zfs.create('{:s}/sub3/abc'.format(fs0.name), ssh=ssh) 527 | fs0.snapshot('snap6', recursive=True) 528 | zfs.create('{:s}/sub3/abc_abc'.format(fs0.name), ssh=ssh) 529 | fs0.snapshot('snap7', recursive=True) 530 | zfs.create('{:s}/sub3/efg'.format(fs0.name), ssh=ssh) 531 | fs0.snapshot('snap8', recursive=True) 532 | fs0.snapshot('snap9', recursive=True) 533 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 534 | send_config(config) 535 | 536 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 537 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 538 | assert set(fs0_children) == set(fs1_children) 539 | 540 | 541 | @pytest.mark.dependency(depends=['TestSendingPull::test_send_full']) 542 | def test_send_incremental(self, zpools): 543 | fs1, fs0 = zpools # here fs0 is the remote pool 544 | ssh = fs0.ssh 545 | 546 | fs0.destroy(force=True) 547 | fs1.destroy(force=True) 548 | 549 | fs0.snapshot('snap0', recursive=True) 550 | zfs.create('{:s}/sub1'.format(fs0.name), ssh=ssh) 551 | fs0.snapshot('snap1', recursive=True) 552 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 553 | send_config(config) 554 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 555 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 556 | assert set(fs0_children) == set(fs1_children) 557 | 558 | zfs.create('{:s}/sub2'.format(fs0.name), ssh=ssh) 559 | fs0.snapshot('snap2', recursive=True) 560 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 561 | send_config(config) 562 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 563 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 564 | assert set(fs0_children) == set(fs1_children) 565 | 566 | zfs.create('{:s}/sub3'.format(fs0.name), ssh=ssh) 567 | fs0.snapshot('snap3', recursive=True) 568 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 569 | send_config(config) 570 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 571 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 572 | assert set(fs0_children) == set(fs1_children) 573 | 574 | 575 | @pytest.mark.dependency(depends=['TestSendingPull::test_send_incremental']) 576 | def test_send_delete_snapshot(self, zpools): 577 | fs1, fs0 = zpools # here fs0 is the remote pool 578 | ssh = fs0.ssh 579 | 580 | # Delete recent snapshots on dest 581 | fs1.snapshots()[-1].destroy(force=True) 582 | fs1.snapshots()[-1].destroy(force=True) 583 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 584 | send_config(config) 585 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 586 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 587 | assert set(fs0_children) == set(fs1_children) 588 | 589 | # Delete recent snapshot on source 590 | fs0.snapshot('snap4', recursive=True) 591 | send_config(config) 592 | fs0.snapshots()[-1].destroy(force=True) 593 | fs0.snapshot('snap5', recursive=True) 594 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 595 | send_config(config) 596 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 597 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 598 | assert set(fs0_children) == set(fs1_children) 599 | 600 | 601 | @pytest.mark.dependency(depends=['TestSendingPull::test_send_delete_snapshot']) 602 | def test_send_delete_sub(self, zpools): 603 | fs1, fs0 = zpools # here fs0 is the remote pool 604 | ssh = fs0.ssh 605 | 606 | # Delete subfilesystems 607 | sub3 = fs1.filesystems()[-1] 608 | sub3.destroy(force=True) 609 | fs0.snapshot('snap6', recursive=True) 610 | sub2 = fs1.filesystems()[-1] 611 | sub2.destroy(force=True) 612 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 613 | send_config(config) 614 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 615 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 616 | assert set(fs0_children) == set(fs1_children) 617 | 618 | 619 | @pytest.mark.dependency(depends=['TestSendingPull::test_send_delete_sub']) 620 | def test_send_delete_old(self, zpools): 621 | fs1, fs0 = zpools # here fs0 is the remote pool 622 | ssh = fs0.ssh 623 | 624 | # Delete old snapshot on source 625 | fs0.snapshots()[0].destroy(force=True) 626 | fs0.snapshot('snap7', recursive=True) 627 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': None}] 628 | send_config(config) 629 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 630 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 631 | assert not (set(fs0_children) == set(fs1_children)) 632 | # Assert that snap0 was not deleted from fs1 633 | for child in set(fs1_children) - set(fs0_children): 634 | assert child.endswith('snap0') 635 | 636 | 637 | @pytest.mark.dependency() 638 | def test_send_exclude(self, zpools): 639 | """Checks if send_snap totally replicates a filesystem""" 640 | fs1, fs0 = zpools # here fs0 is the remote pool 641 | ssh = fs0.ssh 642 | fs0.destroy(force=True) 643 | fs1.destroy(force=True) 644 | 645 | exclude = ['*/sub1', '*/sub3/abc', '*/sub3/efg'] 646 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'dest': [fs1.name], 'exclude': [exclude]}] 647 | 648 | zfs.create('{:s}/sub1'.format(fs0.name), ssh=ssh) 649 | zfs.create('{:s}/sub2'.format(fs0.name), ssh=ssh) 650 | zfs.create('{:s}/sub3'.format(fs0.name), ssh=ssh) 651 | zfs.create('{:s}/sub3/abc'.format(fs0.name), ssh=ssh) 652 | zfs.create('{:s}/sub3/abc_abc'.format(fs0.name), ssh=ssh) 653 | zfs.create('{:s}/sub3/efg'.format(fs0.name), ssh=ssh) 654 | fs0.snapshot('snap', recursive=True) 655 | send_config(config) 656 | 657 | fs0_children = set([child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]]) 658 | fs1_children = set([child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]]) 659 | # remove unwanted datasets/snapshots 660 | for match in exclude: 661 | fs0_children -= set(fnmatch.filter(fs0_children, match)) 662 | fs0_children -= set(fnmatch.filter(fs0_children, match + '@snap')) 663 | 664 | assert set(fs0_children) == set(fs1_children) 665 | 666 | 667 | @pytest.mark.dependency() 668 | def test_send_compress(self, zpools): 669 | """Checks if send_snap totally replicates a filesystem""" 670 | fs1, fs0 = zpools # here fs0 is the remote pool 671 | ssh = fs0.ssh 672 | 673 | fs0.destroy(force=True) 674 | fs1.destroy(force=True) 675 | 676 | fs0.snapshot('snap0') 677 | zfs.create('{:s}/sub1'.format(fs0.name), ssh=ssh) 678 | fs0.snapshot('snap1', recursive=True) 679 | zfs.create('{:s}/sub2'.format(fs0.name), ssh=ssh) 680 | fs0.snapshot('snap2', recursive=True) 681 | fs0.snapshot('snap3', recursive=True) 682 | zfs.create('{:s}/sub2/abc'.format(fs0.name), ssh=ssh) 683 | fs0.snapshot('snap4', recursive=True) 684 | fs0.snapshot('snap5', recursive=True) 685 | 686 | for compression in ['none', 'lzop', 'lz4']: 687 | fs1.destroy(force=True) 688 | config = [{'name': 'ssh:{:d}:{}'.format(PORT, fs0), 'key': KEY, 'dest': [fs1.name], 'compress': [compression]}] 689 | send_config(config) 690 | 691 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'], ssh=ssh)[1:]] 692 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 693 | assert set(fs0_children) == set(fs1_children) 694 | -------------------------------------------------------------------------------- /tests/test_pyznap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env pytest -v 2 | """ 3 | pyznap.test_pyznap 4 | ~~~~~~~~~~~~~~ 5 | 6 | Test pyznap over time. 7 | 8 | :copyright: (c) 2018-2019 by Yannick Boetzel. 9 | :license: GPLv3, see LICENSE for more details. 10 | """ 11 | 12 | import subprocess as sp 13 | import sys 14 | import os 15 | import logging 16 | import random 17 | import string 18 | from subprocess import Popen, PIPE 19 | from tempfile import NamedTemporaryFile 20 | from datetime import datetime, timedelta 21 | import pytest 22 | 23 | import pyznap.pyzfs as zfs 24 | from pyznap.utils import exists 25 | from pyznap.process import DatasetNotFoundError 26 | 27 | 28 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', 29 | datefmt='%b %d %H:%M:%S') 30 | logger = logging.getLogger(__name__) 31 | 32 | assert exists('faketime') 33 | 34 | def randomword(length): 35 | letters = string.ascii_lowercase 36 | return ''.join(random.choice(letters) for i in range(length)) 37 | 38 | ZPOOL = '/sbin/zpool' 39 | _word = randomword(8) 40 | POOL0 = 'pyznap_source_' + _word 41 | POOL1 = 'pyznap_dest_' + _word 42 | 43 | N_FREQUENT = 30 44 | N_HOURLY = 24 45 | N_DAILY = 14 46 | N_WEEKLY = 8 47 | N_MONTHLY = 12 48 | N_YEARLY = 3 49 | 50 | SNAPSHOTS_REF = {'frequent': N_FREQUENT, 'hourly': N_HOURLY, 'daily': N_DAILY, 'weekly': N_WEEKLY, 51 | 'monthly': N_MONTHLY, 'yearly': N_YEARLY} 52 | 53 | 54 | @pytest.fixture(scope='module') 55 | def zpools(): 56 | """Creates two temporary zpools to be called from test functions. Yields the two pool names 57 | and destroys them after testing.""" 58 | 59 | # Create temporary files on which the zpools are created 60 | with NamedTemporaryFile() as file0, NamedTemporaryFile() as file1: 61 | filename0 = file0.name 62 | filename1 = file1.name 63 | 64 | # Fix size to 100Mb 65 | file0.seek(100*1024**2-1) 66 | file0.write(b'0') 67 | file0.seek(0) 68 | file1.seek(100*1024**2-1) 69 | file1.write(b'0') 70 | file1.seek(0) 71 | 72 | # Create temporary test pools 73 | for pool, filename in zip([POOL0, POOL1], [filename0, filename1]): 74 | try: 75 | sp.check_call([ZPOOL, 'create', pool, filename]) 76 | except sp.CalledProcessError as err: 77 | logger.error(err) 78 | return 79 | 80 | try: 81 | fs0 = zfs.open(POOL0) 82 | fs1 = zfs.open(POOL1) 83 | assert fs0.name == POOL0 84 | assert fs1.name == POOL1 85 | except (DatasetNotFoundError, AssertionError, Exception) as err: 86 | logger.error(err) 87 | else: 88 | yield fs0, fs1 89 | 90 | # Destroy temporary test pools 91 | for pool in [POOL0, POOL1]: 92 | try: 93 | sp.check_call([ZPOOL, 'destroy', pool]) 94 | except sp.CalledProcessError as err: 95 | logger.error(err) 96 | 97 | @pytest.fixture(scope='module') 98 | def config(): 99 | """Creates a temporary config file and yields its filename""" 100 | 101 | with NamedTemporaryFile('w') as file: 102 | file.write(f'[{POOL0}]\n' 103 | f'frequent = {N_FREQUENT}\n' 104 | f'hourly = {N_HOURLY}\n' 105 | f'daily = {N_DAILY}\n' 106 | f'weekly = {N_WEEKLY}\n' 107 | f'monthly = {N_MONTHLY}\n' 108 | f'yearly = {N_YEARLY}\n' 109 | f'snap = yes\n' 110 | f'clean = yes\n\n') 111 | file.seek(0) 112 | yield file.name 113 | 114 | 115 | @pytest.fixture(scope='module') 116 | def config_send(): 117 | """Creates a temporary config file and yields its filename""" 118 | 119 | with NamedTemporaryFile('w') as file: 120 | file.write(f'[{POOL0}]\n' 121 | f'frequent = {N_FREQUENT}\n' 122 | f'hourly = {N_HOURLY}\n' 123 | f'daily = {N_DAILY}\n' 124 | f'weekly = {N_WEEKLY}\n' 125 | f'monthly = {N_MONTHLY}\n' 126 | f'yearly = {N_YEARLY}\n' 127 | f'snap = yes\n' 128 | f'clean = yes\n' 129 | f'dest = {POOL1}\n' 130 | 131 | f'[{POOL1}]\n' 132 | f'frequent = {N_FREQUENT}\n' 133 | f'hourly = {N_HOURLY}\n' 134 | f'daily = {N_DAILY}\n' 135 | f'weekly = {N_WEEKLY}\n' 136 | f'monthly = {N_MONTHLY}\n' 137 | f'yearly = {N_YEARLY}\n' 138 | f'clean = yes\n\n') 139 | file.seek(0) 140 | yield file.name 141 | 142 | 143 | @pytest.mark.slow 144 | class TestCycle(object): 145 | def test_2_hours(self, zpools, config): 146 | """Tests pyznap over 2 hours and checks if the correct amount of 'frequent' snapshots are taken""" 147 | 148 | fs, _ = zpools 149 | fs.destroy(force=True) 150 | 151 | start_date = datetime(2014, 1, 1) 152 | dates = [start_date + i * timedelta(minutes=1) for i in range(60*2)] 153 | 154 | for n,date in enumerate(dates): 155 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 156 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 157 | 158 | # take snaps every 1min 159 | _, _ = Popen(pyznap_snap).communicate() 160 | 161 | # get all snapshots 162 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 163 | for snap in fs.snapshots(): 164 | snap_type = snap.name.split('_')[-1] 165 | snapshots[snap_type].append(snap) 166 | # check if there are not too many snapshots taken 167 | for snap_type, snaps in snapshots.items(): 168 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 169 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 170 | if n+1 >= N_FREQUENT: 171 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 172 | 173 | 174 | def test_2_days(self, zpools, config): 175 | """Tests pyznap over 2 days and checks if the correct amount of 'frequent' snapshots are taken""" 176 | 177 | fs, _ = zpools 178 | fs.destroy(force=True) 179 | 180 | start_date = datetime(2014, 1, 1) 181 | dates = [start_date + i * timedelta(minutes=15) for i in range(4*24*2)] 182 | 183 | for n,date in enumerate(dates): 184 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 185 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 186 | 187 | # take snaps every 15min 188 | _, _ = Popen(pyznap_snap).communicate() 189 | 190 | # get all snapshots 191 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 192 | for snap in fs.snapshots(): 193 | snap_type = snap.name.split('_')[-1] 194 | snapshots[snap_type].append(snap) 195 | # check if there are not too many snapshots taken 196 | for snap_type, snaps in snapshots.items(): 197 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 198 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 199 | if n+1 >= N_FREQUENT: 200 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 201 | 202 | 203 | def test_1_week(self, zpools, config): 204 | """Tests pyznap over 1 week and checks if the correct amount of 'frequent' & hourly' 205 | snapshots are taken""" 206 | 207 | fs, _ = zpools 208 | fs.destroy(force=True) 209 | 210 | start_date = datetime(2014, 1, 1) 211 | dates = [start_date + i * timedelta(hours=1) for i in range(24*7)] 212 | 213 | for n,date in enumerate(dates): 214 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 215 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 216 | 217 | # take snaps every 1h 218 | _, _ = Popen(pyznap_snap).communicate() 219 | 220 | # get all snapshots 221 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 222 | for snap in fs.snapshots(): 223 | snap_type = snap.name.split('_')[-1] 224 | snapshots[snap_type].append(snap) 225 | # check if there are not too many snapshots taken 226 | for snap_type, snaps in snapshots.items(): 227 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 228 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 229 | if n+1 >= N_FREQUENT: 230 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 231 | # check if after N_HOURLY runs there are N_HOURLY 'hourly' snapshots 232 | if n+1 >= N_HOURLY: 233 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 234 | 235 | 236 | def test_8_weeks(self, zpools, config): 237 | """Tests pyznap over 8 weeks and checks if the correct amount of 'frequent', 'hourly' & 238 | 'daily' snapshots are taken""" 239 | 240 | fs, _ = zpools 241 | fs.destroy(force=True) 242 | 243 | start_date = datetime(2014, 1, 1) 244 | dates = [start_date + i * timedelta(days=1) for i in range(7*8)] 245 | 246 | for n,date in enumerate(dates): 247 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 248 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 249 | 250 | # take snaps every 1d 251 | _, _ = Popen(pyznap_snap).communicate() 252 | 253 | # get all snapshots 254 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 255 | for snap in fs.snapshots(): 256 | snap_type = snap.name.split('_')[-1] 257 | snapshots[snap_type].append(snap) 258 | # check if there are not too many snapshots taken 259 | for snap_type, snaps in snapshots.items(): 260 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 261 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 262 | if n+1 >= N_FREQUENT: 263 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 264 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 265 | if n+1 >= N_HOURLY: 266 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 267 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 268 | if n+1 >= N_DAILY: 269 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 270 | 271 | 272 | def test_6_months(self, zpools, config): 273 | """Tests pyznap over 6 months and checks if the correct amount of 'frequent', 'hourly', 274 | 'daily' & 'weekly' snapshots are taken""" 275 | 276 | fs, _ = zpools 277 | fs.destroy(force=True) 278 | 279 | start_date = datetime(2014, 1, 1) 280 | dates = [start_date + i * timedelta(days=7) for i in range(4*6)] 281 | 282 | for n,date in enumerate(dates): 283 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 284 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 285 | 286 | # take snaps every 7d 287 | _, _ = Popen(pyznap_snap).communicate() 288 | 289 | # get all snapshots 290 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 291 | for snap in fs.snapshots(): 292 | snap_type = snap.name.split('_')[-1] 293 | snapshots[snap_type].append(snap) 294 | # check if there are not too many snapshots taken 295 | for snap_type, snaps in snapshots.items(): 296 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 297 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 298 | if n+1 >= N_FREQUENT: 299 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 300 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 301 | if n+1 >= N_HOURLY: 302 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 303 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 304 | if n+1 >= N_DAILY: 305 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 306 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 307 | if n+1 >= N_WEEKLY: 308 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 309 | 310 | 311 | def test_3_years(self, zpools, config): 312 | """Tests pyznap over 3 years and checks if the correct amount of 'frequent', 'hourly', 313 | 'daily', 'weekly' & 'monthly' snapshots are taken""" 314 | 315 | fs, _ = zpools 316 | fs.destroy(force=True) 317 | 318 | start_date = datetime(2014, 1, 1) 319 | dates = [start_date + i * timedelta(days=31) for i in range(12*3)] 320 | 321 | for n,date in enumerate(dates): 322 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 323 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 324 | 325 | # take snaps every 31d 326 | _, _ = Popen(pyznap_snap).communicate() 327 | 328 | # get all snapshots 329 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 330 | for snap in fs.snapshots(): 331 | snap_type = snap.name.split('_')[-1] 332 | snapshots[snap_type].append(snap) 333 | # check if there are not too many snapshots taken 334 | for snap_type, snaps in snapshots.items(): 335 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 336 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 337 | if n+1 >= N_FREQUENT: 338 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 339 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 340 | if n+1 >= N_HOURLY: 341 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 342 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 343 | if n+1 >= N_DAILY: 344 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 345 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 346 | if n+1 >= N_WEEKLY: 347 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 348 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 349 | if n+1 >= N_MONTHLY: 350 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 351 | 352 | 353 | def test_50_years(self, zpools, config): 354 | """Tests pyznap over 50 years and checks if the correct amount of 'frequent', 'hourly', 355 | 'daily', 'weekly', 'monthly' & 'yearly' snapshots are taken""" 356 | 357 | fs, _ = zpools 358 | fs.destroy(force=True) 359 | 360 | # have to start at 1969 as faketime only goes from 1969 to 2068 361 | dates = [datetime(1969 + i, 1, 1) for i in range(50)] 362 | 363 | for n,date in enumerate(dates): 364 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 365 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 366 | 367 | # take snaps every 1y 368 | _, _ = Popen(pyznap_snap).communicate() 369 | 370 | # get all snapshots 371 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 372 | for snap in fs.snapshots(): 373 | snap_type = snap.name.split('_')[-1] 374 | snapshots[snap_type].append(snap) 375 | # check if there are not too many snapshots taken 376 | for snap_type, snaps in snapshots.items(): 377 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 378 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 379 | if n+1 >= N_FREQUENT: 380 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 381 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 382 | if n+1 >= N_HOURLY: 383 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 384 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 385 | if n+1 >= N_DAILY: 386 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 387 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 388 | if n+1 >= N_WEEKLY: 389 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 390 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 391 | if n+1 >= N_MONTHLY: 392 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 393 | # check if after N_YEARLY runs there are N_YEARLY 'yearly' snapshots 394 | if n+1 >= N_YEARLY: 395 | assert len(snapshots['yearly']) == SNAPSHOTS_REF['yearly'] 396 | 397 | 398 | @pytest.mark.slow 399 | class TestSend(object): 400 | def test_50_years(self, zpools, config_send): 401 | """Tests pyznap over 50 years and checks if snapshots are sent correctly""" 402 | 403 | fs0, fs1 = zpools 404 | fs0.destroy(force=True) 405 | fs1.destroy(force=True) 406 | zfs.create('{:s}/sub1'.format(fs0.name)) 407 | 408 | # have to start at 1969 as faketime only goes from 1969 to 2068 409 | dates = [datetime(1969 + i, 1, 1) for i in range(50)] 410 | 411 | for n,date in enumerate(dates): 412 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 413 | pyznap_take = faketime + ['pyznap', '--config', config_send, 'snap', '--take'] 414 | pyznap_clean = faketime + ['pyznap', '--config', config_send, 'snap', '--clean'] 415 | pyznap_send = faketime + ['pyznap', '--config', config_send, 'send'] 416 | 417 | # take, send & clean snaps every 1y 418 | _, _ = Popen(pyznap_take).communicate() 419 | _, _ = Popen(pyznap_send).communicate() 420 | _, _ = Popen(pyznap_clean).communicate() 421 | 422 | # get all snapshots on fs0 423 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 424 | for snap in fs0.snapshots(): 425 | snap_type = snap.name.split('_')[-1] 426 | snapshots[snap_type].append(snap) 427 | # check if there are not too many snapshots taken 428 | for snap_type, snaps in snapshots.items(): 429 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 430 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 431 | if n+1 >= N_FREQUENT: 432 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 433 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 434 | if n+1 >= N_HOURLY: 435 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 436 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 437 | if n+1 >= N_DAILY: 438 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 439 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 440 | if n+1 >= N_WEEKLY: 441 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 442 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 443 | if n+1 >= N_MONTHLY: 444 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 445 | # check if after N_YEARLY runs there are N_YEARLY 'yearly' snapshots 446 | if n+1 >= N_YEARLY: 447 | assert len(snapshots['yearly']) == SNAPSHOTS_REF['yearly'] 448 | 449 | # check if filesystem is completely replicated on dest 450 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 451 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 452 | assert set(fs0_children) == set(fs1_children) 453 | 454 | 455 | def test_create_new(self, zpools, config_send): 456 | """Tests pyznap over 10 years and checks if newly created filesystems are correctly 457 | replicated""" 458 | 459 | fs0, fs1 = zpools 460 | fs0.destroy(force=True) 461 | fs1.destroy(force=True) 462 | 463 | # have to start at 1969 as faketime only goes from 1969 to 2068 464 | dates = [datetime(1969 + i, 1, 1) for i in range(10)] 465 | 466 | for n,date in enumerate(dates): 467 | # at every step create a new subfilesystem 468 | zfs.create('{:s}/sub{:d}'.format(fs0.name, n)) 469 | 470 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 471 | pyznap_take = faketime + ['pyznap', '--config', config_send, 'snap', '--take'] 472 | pyznap_clean = faketime + ['pyznap', '--config', config_send, 'snap', '--clean'] 473 | pyznap_send = faketime + ['pyznap', '--config', config_send, 'send'] 474 | 475 | # take, send & clean snaps every 1y 476 | _, _ = Popen(pyznap_take).communicate() 477 | _, _ = Popen(pyznap_send).communicate() 478 | _, _ = Popen(pyznap_clean).communicate() 479 | 480 | # get all snapshots on fs0 481 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 482 | for snap in fs0.snapshots(): 483 | snap_type = snap.name.split('_')[-1] 484 | snapshots[snap_type].append(snap) 485 | # check if there are not too many snapshots taken 486 | for snap_type, snaps in snapshots.items(): 487 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 488 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 489 | if n+1 >= N_FREQUENT: 490 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 491 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 492 | if n+1 >= N_HOURLY: 493 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 494 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 495 | if n+1 >= N_DAILY: 496 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 497 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 498 | if n+1 >= N_WEEKLY: 499 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 500 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 501 | if n+1 >= N_MONTHLY: 502 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 503 | # check if after N_YEARLY runs there are N_YEARLY 'yearly' snapshots 504 | if n+1 >= N_YEARLY: 505 | assert len(snapshots['yearly']) == SNAPSHOTS_REF['yearly'] 506 | 507 | # check if filesystem is completely replicated on dest 508 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 509 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'])[1:]] 510 | assert set(fs0_children) == set(fs1_children) 511 | 512 | 513 | @pytest.mark.slow 514 | class TestSpecialCases(object): 515 | def test_winter_time(self, zpools, config): 516 | """Tests if pyznap does not crash when switching to winter time""" 517 | 518 | fs, _ = zpools 519 | fs.destroy(force=True) 520 | 521 | start_date = datetime(2018, 10, 28, 2, 0, 0) 522 | dates = [start_date + i * timedelta(minutes=15) for i in range(4)] 523 | 524 | for date in dates: 525 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 526 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 527 | 528 | # take snaps every 15min 529 | _, _ = Popen(pyznap_snap).communicate() 530 | 531 | start_date = datetime(2018, 10, 28, 2, 0, 0) 532 | dates = [start_date + i * timedelta(minutes=15) for i in range(8)] 533 | 534 | for date in dates: 535 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 536 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 537 | 538 | # take snaps every 15min 539 | _, _ = Popen(pyznap_snap).communicate() 540 | -------------------------------------------------------------------------------- /tests/test_pyznap_ssh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env pytest -v 2 | """ 3 | pyznap.test_pyznap_ssh 4 | ~~~~~~~~~~~~~~ 5 | 6 | Test pyznap over time (ssh). 7 | 8 | :copyright: (c) 2018-2019 by Yannick Boetzel. 9 | :license: GPLv3, see LICENSE for more details. 10 | """ 11 | 12 | import subprocess as sp 13 | import sys 14 | import os 15 | import random 16 | import string 17 | import logging 18 | from subprocess import Popen, PIPE 19 | from tempfile import NamedTemporaryFile 20 | from datetime import datetime, timedelta 21 | import pytest 22 | 23 | import pyznap.pyzfs as zfs 24 | from pyznap.utils import exists 25 | from test_utils import open_ssh 26 | from pyznap.ssh import SSH 27 | from pyznap.process import check_output, DatasetNotFoundError 28 | 29 | 30 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', 31 | datefmt='%b %d %H:%M:%S') 32 | logger = logging.getLogger(__name__) 33 | logging.getLogger("paramiko").setLevel(logging.ERROR) 34 | 35 | assert exists('faketime') 36 | 37 | def randomword(length): 38 | letters = string.ascii_lowercase 39 | return ''.join(random.choice(letters) for i in range(length)) 40 | 41 | # ssh connection to dest 42 | USER = 'root' 43 | HOST = '127.0.0.1' 44 | PORT = 22 45 | KEY = None 46 | 47 | ZPOOL = '/sbin/zpool' 48 | _word = randomword(8) 49 | POOL0 = 'pyznap_source_' + _word 50 | POOL1 = 'pyznap_dest_' + _word 51 | 52 | N_FREQUENT = 30 53 | N_HOURLY = 24 54 | N_DAILY = 14 55 | N_WEEKLY = 8 56 | N_MONTHLY = 12 57 | N_YEARLY = 3 58 | 59 | SNAPSHOTS_REF = {'frequent': N_FREQUENT, 'hourly': N_HOURLY, 'daily': N_DAILY, 'weekly': N_WEEKLY, 60 | 'monthly': N_MONTHLY, 'yearly': N_YEARLY} 61 | 62 | 63 | @pytest.fixture(scope='module') 64 | def zpools(): 65 | """Creates two temporary zpools to be called from test functions, source is local and dest on 66 | remote ssh location. Yields the two pool names and destroys them after testing.""" 67 | 68 | sftp_filename = '/tmp/' + randomword(10) 69 | 70 | # ssh arguments for zfs functions 71 | ssh = SSH(USER, HOST, port=PORT, key=KEY) 72 | # need paramiko for sftp file 73 | sshclient = open_ssh(USER, HOST, port=PORT, key=KEY) 74 | sftp = sshclient.open_sftp() 75 | 76 | # Create temporary file on which the source zpool is created. Manually create sftp file 77 | with NamedTemporaryFile() as file0, sftp.open(sftp_filename, 'w') as file1: 78 | filename0 = file0.name 79 | filename1 = sftp_filename 80 | 81 | # Fix size to 100Mb 82 | file0.seek(100*1024**2-1) 83 | file0.write(b'0') 84 | file0.seek(0) 85 | file1.seek(100*1024**2-1) 86 | file1.write(b'0') 87 | file1.seek(0) 88 | 89 | # Create temporary test pools 90 | try: 91 | check_output([ZPOOL, 'create', POOL0, filename0]) 92 | except sp.CalledProcessError as err: 93 | logger.error(err) 94 | return 95 | 96 | try: 97 | check_output([ZPOOL, 'create', POOL1, filename1], ssh=ssh) 98 | except sp.CalledProcessError as err: 99 | logger.error(err) 100 | return 101 | 102 | try: 103 | fs0 = zfs.open(POOL0) 104 | fs1 = zfs.open(POOL1, ssh=ssh) 105 | assert fs0.name == POOL0 106 | assert fs1.name == POOL1 107 | except (DatasetNotFoundError, AssertionError, Exception) as err: 108 | logger.error(err) 109 | else: 110 | yield fs0, fs1 111 | 112 | # Destroy temporary test pools 113 | try: 114 | check_output([ZPOOL, 'destroy', POOL0]) 115 | except sp.CalledProcessError as err: 116 | logger.error(err) 117 | 118 | try: 119 | check_output([ZPOOL, 'destroy', POOL1], ssh=ssh) 120 | except sp.CalledProcessError as err: 121 | logger.error(err) 122 | 123 | # Delete tempfile on dest 124 | sftp.remove(sftp_filename) 125 | sftp.close() 126 | ssh.close() 127 | 128 | 129 | @pytest.fixture(scope='module') 130 | def config(): 131 | """Creates a temporary config file and yields its filename""" 132 | 133 | with NamedTemporaryFile('w') as file: 134 | file.write(f'[ssh:{PORT}:{USER}@{HOST}:{POOL1}]\n' 135 | f'frequent = {N_FREQUENT}\n' 136 | f'hourly = {N_HOURLY}\n' 137 | f'daily = {N_DAILY}\n' 138 | f'weekly = {N_WEEKLY}\n' 139 | f'monthly = {N_MONTHLY}\n' 140 | f'yearly = {N_YEARLY}\n' 141 | f'snap = yes\n' 142 | f'clean = yes\n\n') 143 | file.seek(0) 144 | yield file.name 145 | 146 | 147 | @pytest.fixture(scope='module') 148 | def config_send(): 149 | """Creates a temporary config file and yields its filename""" 150 | 151 | with NamedTemporaryFile('w') as file: 152 | file.write(f'[{POOL0}]\n' 153 | f'frequent = {N_FREQUENT}\n' 154 | f'hourly = {N_HOURLY}\n' 155 | f'daily = {N_DAILY}\n' 156 | f'weekly = {N_WEEKLY}\n' 157 | f'monthly = {N_MONTHLY}\n' 158 | f'yearly = {N_YEARLY}\n' 159 | f'snap = yes\n' 160 | f'clean = yes\n' 161 | f'dest = ssh:{PORT}:{USER}@{HOST}:{POOL1}\n' 162 | 163 | f'[ssh:{PORT}:{USER}@{HOST}:{POOL1}]\n' 164 | f'frequent = {N_FREQUENT}\n' 165 | f'hourly = {N_HOURLY}\n' 166 | f'daily = {N_DAILY}\n' 167 | f'weekly = {N_WEEKLY}\n' 168 | f'monthly = {N_MONTHLY}\n' 169 | f'yearly = {N_YEARLY}\n' 170 | f'clean = yes\n\n') 171 | file.seek(0) 172 | yield file.name 173 | 174 | 175 | @pytest.mark.slow 176 | class TestCycle(object): 177 | def test_2_hours(self, zpools, config): 178 | """Tests pyznap over 2 hours and checks if the correct amount of 'frequent' snapshots are taken""" 179 | 180 | _, fs = zpools 181 | fs.destroy(force=True) 182 | 183 | start_date = datetime(2014, 1, 1) 184 | dates = [start_date + i * timedelta(minutes=1) for i in range(60*2)] 185 | 186 | for n,date in enumerate(dates): 187 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 188 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 189 | 190 | # take snaps every 15min 191 | _, _ = Popen(pyznap_snap).communicate() 192 | 193 | # get all snapshots 194 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 195 | for snap in fs.snapshots(): 196 | snap_type = snap.name.split('_')[-1] 197 | snapshots[snap_type].append(snap) 198 | # check if there are not too many snapshots taken 199 | for snap_type, snaps in snapshots.items(): 200 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 201 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 202 | if n+1 >= N_FREQUENT: 203 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 204 | 205 | 206 | def test_2_days(self, zpools, config): 207 | """Tests pyznap over 2 days and checks if the correct amount of 'frequent' snapshots are taken""" 208 | 209 | _, fs = zpools 210 | fs.destroy(force=True) 211 | 212 | start_date = datetime(2014, 1, 1) 213 | dates = [start_date + i * timedelta(minutes=15) for i in range(4*24*2)] 214 | 215 | for n,date in enumerate(dates): 216 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 217 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 218 | 219 | # take snaps every 15min 220 | _, _ = Popen(pyznap_snap).communicate() 221 | 222 | # get all snapshots 223 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 224 | for snap in fs.snapshots(): 225 | snap_type = snap.name.split('_')[-1] 226 | snapshots[snap_type].append(snap) 227 | # check if there are not too many snapshots taken 228 | for snap_type, snaps in snapshots.items(): 229 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 230 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 231 | if n+1 >= N_FREQUENT: 232 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 233 | 234 | 235 | def test_1_week(self, zpools, config): 236 | """Tests pyznap over 1 week and checks if the correct amount of 'frequent' & hourly' 237 | snapshots are taken""" 238 | 239 | _, fs = zpools 240 | fs.destroy(force=True) 241 | 242 | start_date = datetime(2014, 1, 1) 243 | dates = [start_date + i * timedelta(hours=1) for i in range(24*7)] 244 | 245 | for n,date in enumerate(dates): 246 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 247 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 248 | 249 | # take snaps every 1h 250 | _, _ = Popen(pyznap_snap).communicate() 251 | 252 | # get all snapshots 253 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 254 | for snap in fs.snapshots(): 255 | snap_type = snap.name.split('_')[-1] 256 | snapshots[snap_type].append(snap) 257 | # check if there are not too many snapshots taken 258 | for snap_type, snaps in snapshots.items(): 259 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 260 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 261 | if n+1 >= N_FREQUENT: 262 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 263 | # check if after N_HOURLY runs there are N_HOURLY 'hourly' snapshots 264 | if n+1 >= N_HOURLY: 265 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 266 | 267 | 268 | def test_8_weeks(self, zpools, config): 269 | """Tests pyznap over 8 weeks and checks if the correct amount of 'frequent', 'hourly' & 270 | 'daily' snapshots are taken""" 271 | 272 | _, fs = zpools 273 | fs.destroy(force=True) 274 | 275 | start_date = datetime(2014, 1, 1) 276 | dates = [start_date + i * timedelta(days=1) for i in range(7*8)] 277 | 278 | for n,date in enumerate(dates): 279 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 280 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 281 | 282 | # take snaps every 1d 283 | _, _ = Popen(pyznap_snap).communicate() 284 | 285 | # get all snapshots 286 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 287 | for snap in fs.snapshots(): 288 | snap_type = snap.name.split('_')[-1] 289 | snapshots[snap_type].append(snap) 290 | # check if there are not too many snapshots taken 291 | for snap_type, snaps in snapshots.items(): 292 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 293 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 294 | if n+1 >= N_FREQUENT: 295 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 296 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 297 | if n+1 >= N_HOURLY: 298 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 299 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 300 | if n+1 >= N_DAILY: 301 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 302 | 303 | 304 | def test_6_months(self, zpools, config): 305 | """Tests pyznap over 6 months and checks if the correct amount of 'frequent', 'hourly', 306 | 'daily' & 'weekly' snapshots are taken""" 307 | 308 | _, fs = zpools 309 | fs.destroy(force=True) 310 | 311 | start_date = datetime(2014, 1, 1) 312 | dates = [start_date + i * timedelta(days=7) for i in range(4*6)] 313 | 314 | for n,date in enumerate(dates): 315 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 316 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 317 | 318 | # take snaps every 7d 319 | _, _ = Popen(pyznap_snap).communicate() 320 | 321 | # get all snapshots 322 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 323 | for snap in fs.snapshots(): 324 | snap_type = snap.name.split('_')[-1] 325 | snapshots[snap_type].append(snap) 326 | # check if there are not too many snapshots taken 327 | for snap_type, snaps in snapshots.items(): 328 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 329 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 330 | if n+1 >= N_FREQUENT: 331 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 332 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 333 | if n+1 >= N_HOURLY: 334 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 335 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 336 | if n+1 >= N_DAILY: 337 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 338 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 339 | if n+1 >= N_WEEKLY: 340 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 341 | 342 | 343 | def test_3_years(self, zpools, config): 344 | """Tests pyznap over 3 years and checks if the correct amount of 'frequent', 'hourly', 345 | 'daily', 'weekly' & 'monthly' snapshots are taken""" 346 | 347 | _, fs = zpools 348 | fs.destroy(force=True) 349 | 350 | start_date = datetime(2014, 1, 1) 351 | dates = [start_date + i * timedelta(days=31) for i in range(12*3)] 352 | 353 | for n,date in enumerate(dates): 354 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 355 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 356 | 357 | # take snaps every 31d 358 | _, _ = Popen(pyznap_snap).communicate() 359 | 360 | # get all snapshots 361 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 362 | for snap in fs.snapshots(): 363 | snap_type = snap.name.split('_')[-1] 364 | snapshots[snap_type].append(snap) 365 | # check if there are not too many snapshots taken 366 | for snap_type, snaps in snapshots.items(): 367 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 368 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 369 | if n+1 >= N_FREQUENT: 370 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 371 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 372 | if n+1 >= N_HOURLY: 373 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 374 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 375 | if n+1 >= N_DAILY: 376 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 377 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 378 | if n+1 >= N_WEEKLY: 379 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 380 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 381 | if n+1 >= N_MONTHLY: 382 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 383 | 384 | 385 | def test_50_years(self, zpools, config): 386 | """Tests pyznap over 50 years and checks if the correct amount of 'frequent', 'hourly', 387 | 'daily', 'weekly', 'monthly' & 'yearly' snapshots are taken""" 388 | 389 | _, fs = zpools 390 | fs.destroy(force=True) 391 | 392 | # have to start at 1971 as faketime only goes from 1969 to 2068 and ssh does not like 393 | # dates before 1971 394 | dates = [datetime(1971 + i, 1, 1) for i in range(50)] 395 | 396 | for n,date in enumerate(dates): 397 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 398 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 399 | 400 | # take snaps every 1y 401 | _, _ = Popen(pyznap_snap).communicate() 402 | 403 | # get all snapshots 404 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 405 | for snap in fs.snapshots(): 406 | snap_type = snap.name.split('_')[-1] 407 | snapshots[snap_type].append(snap) 408 | # check if there are not too many snapshots taken 409 | for snap_type, snaps in snapshots.items(): 410 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 411 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 412 | if n+1 >= N_FREQUENT: 413 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 414 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 415 | if n+1 >= N_HOURLY: 416 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 417 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 418 | if n+1 >= N_DAILY: 419 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 420 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 421 | if n+1 >= N_WEEKLY: 422 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 423 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 424 | if n+1 >= N_MONTHLY: 425 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 426 | # check if after N_YEARLY runs there are N_YEARLY 'yearly' snapshots 427 | if n+1 >= N_YEARLY: 428 | assert len(snapshots['yearly']) == SNAPSHOTS_REF['yearly'] 429 | 430 | 431 | @pytest.mark.slow 432 | class TestSend(object): 433 | def test_50_years(self, zpools, config_send): 434 | """Tests pyznap over 50 years and checks if snapshots are sent correctly""" 435 | 436 | fs0, fs1 = zpools 437 | ssh = fs1.ssh 438 | fs0.destroy(force=True) 439 | fs1.destroy(force=True) 440 | zfs.create('{:s}/sub1'.format(fs0.name)) 441 | 442 | # have to start at 1971 as faketime only goes from 1969 to 2068 and ssh does not like 443 | # dates before 1971 444 | dates = [datetime(1971 + i, 1, 1) for i in range(50)] 445 | 446 | for n,date in enumerate(dates): 447 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 448 | pyznap_take = faketime + ['pyznap', '--config', config_send, 'snap', '--take'] 449 | pyznap_clean = faketime + ['pyznap', '--config', config_send, 'snap', '--clean'] 450 | pyznap_send = faketime + ['pyznap', '--config', config_send, 'send'] 451 | 452 | # take, send & clean snaps every 1y 453 | _, _ = Popen(pyznap_take).communicate() 454 | _, _ = Popen(pyznap_send).communicate() 455 | _, _ = Popen(pyznap_clean).communicate() 456 | 457 | # get all snapshots on fs0 458 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 459 | for snap in fs0.snapshots(): 460 | snap_type = snap.name.split('_')[-1] 461 | snapshots[snap_type].append(snap) 462 | # check if there are not too many snapshots taken 463 | for snap_type, snaps in snapshots.items(): 464 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 465 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 466 | if n+1 >= N_FREQUENT: 467 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 468 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 469 | if n+1 >= N_HOURLY: 470 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 471 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 472 | if n+1 >= N_DAILY: 473 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 474 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 475 | if n+1 >= N_WEEKLY: 476 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 477 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 478 | if n+1 >= N_MONTHLY: 479 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 480 | # check if after N_YEARLY runs there are N_YEARLY 'yearly' snapshots 481 | if n+1 >= N_YEARLY: 482 | assert len(snapshots['yearly']) == SNAPSHOTS_REF['yearly'] 483 | 484 | # check if filesystem is completely replicated on dest 485 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 486 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 487 | assert set(fs0_children) == set(fs1_children) 488 | 489 | 490 | def test_create_new(self, zpools, config_send): 491 | """Tests pyznap over 10 years and checks if newly created filesystems are correctly 492 | replicated""" 493 | 494 | fs0, fs1 = zpools 495 | ssh = fs1.ssh 496 | fs0.destroy(force=True) 497 | fs1.destroy(force=True) 498 | 499 | # have to start at 1971 as faketime only goes from 1969 to 2068 and ssh does not like 500 | # dates before 1971 501 | dates = [datetime(1971 + i, 1, 1) for i in range(10)] 502 | 503 | for n,date in enumerate(dates): 504 | # at every step create a new subfilesystem 505 | zfs.create('{:s}/sub{:d}'.format(fs0.name, n)) 506 | 507 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 508 | pyznap_take = faketime + ['pyznap', '--config', config_send, 'snap', '--take'] 509 | pyznap_clean = faketime + ['pyznap', '--config', config_send, 'snap', '--clean'] 510 | pyznap_send = faketime + ['pyznap', '--config', config_send, 'send'] 511 | 512 | # take, send & clean snaps every 1y 513 | _, _ = Popen(pyznap_take).communicate() 514 | _, _ = Popen(pyznap_send).communicate() 515 | _, _ = Popen(pyznap_clean).communicate() 516 | 517 | # get all snapshots on fs0 518 | snapshots = {'frequent': [], 'hourly': [], 'daily': [], 'weekly': [], 'monthly': [], 'yearly': []} 519 | for snap in fs0.snapshots(): 520 | snap_type = snap.name.split('_')[-1] 521 | snapshots[snap_type].append(snap) 522 | # check if there are not too many snapshots taken 523 | for snap_type, snaps in snapshots.items(): 524 | assert len(snaps) <= SNAPSHOTS_REF[snap_type] 525 | # check if after N_FREQUENT runs there are N_FREQUENT 'frequent' snapshots 526 | if n+1 >= N_FREQUENT: 527 | assert len(snapshots['frequent']) == SNAPSHOTS_REF['frequent'] 528 | # check if after N-HOURLY runs there are N-HOURLY 'hourly' snapshots 529 | if n+1 >= N_HOURLY: 530 | assert len(snapshots['hourly']) == SNAPSHOTS_REF['hourly'] 531 | # check if after N_DAILY runs there are N_DAILY 'daily' snapshots 532 | if n+1 >= N_DAILY: 533 | assert len(snapshots['daily']) == SNAPSHOTS_REF['daily'] 534 | # check if after N_WEEKLY runs there are N_WEEKLY 'weekly' snapshots 535 | if n+1 >= N_WEEKLY: 536 | assert len(snapshots['weekly']) == SNAPSHOTS_REF['weekly'] 537 | # check if after N_MONTHLY runs there are N_MONTHLY 'monthly' snapshots 538 | if n+1 >= N_MONTHLY: 539 | assert len(snapshots['monthly']) == SNAPSHOTS_REF['monthly'] 540 | # check if after N_YEARLY runs there are N_YEARLY 'yearly' snapshots 541 | if n+1 >= N_YEARLY: 542 | assert len(snapshots['yearly']) == SNAPSHOTS_REF['yearly'] 543 | 544 | # check if filesystem is completely replicated on dest 545 | fs0_children = [child.name.replace(fs0.name, '') for child in zfs.find(fs0.name, types=['all'])[1:]] 546 | fs1_children = [child.name.replace(fs1.name, '') for child in zfs.find(fs1.name, types=['all'], ssh=ssh)[1:]] 547 | assert set(fs0_children) == set(fs1_children) 548 | 549 | 550 | @pytest.mark.slow 551 | class TestSpecialCases(object): 552 | def test_winter_time(self, zpools, config): 553 | """Tests if pyznap does not crash when switching to winter time""" 554 | 555 | _, fs = zpools 556 | fs.destroy(force=True) 557 | 558 | start_date = datetime(2018, 10, 28, 2, 0, 0) 559 | dates = [start_date + i * timedelta(minutes=15) for i in range(4)] 560 | 561 | for date in dates: 562 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 563 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 564 | 565 | # take snaps every 15min 566 | _, _ = Popen(pyznap_snap).communicate() 567 | 568 | start_date = datetime(2018, 10, 28, 2, 0, 0) 569 | dates = [start_date + i * timedelta(minutes=15) for i in range(8)] 570 | 571 | for date in dates: 572 | faketime = ['faketime', date.strftime('%y-%m-%d %H:%M:%S')] 573 | pyznap_snap = faketime + ['pyznap', '--config', config, 'snap'] 574 | 575 | # take snaps every 15min 576 | _, _ = Popen(pyznap_snap).communicate() 577 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyznap.tests.test_utils 3 | ~~~~~~~~~~~~~~ 4 | 5 | Helper functions for tests. 6 | 7 | :copyright: (c) 2018-2019 by Yannick Boetzel. 8 | :license: GPLv3, see LICENSE for more details. 9 | """ 10 | 11 | 12 | import os 13 | import logging 14 | 15 | import paramiko as pm 16 | from socket import timeout, gaierror 17 | from paramiko.ssh_exception import (AuthenticationException, BadAuthenticationType, 18 | BadHostKeyException, ChannelException, NoValidConnectionsError, 19 | PasswordRequiredException, SSHException, PartialAuthentication, 20 | ProxyCommandFailure) 21 | 22 | 23 | def open_ssh(user, host, key=None, port=22): 24 | """Opens an ssh connection to host. 25 | 26 | Parameters: 27 | ---------- 28 | user : {str} 29 | Username to use 30 | host : {str} 31 | Host to connect to 32 | key : {str}, optional 33 | Path to ssh keyfile (the default is None, meaning the standard location 34 | '~/.ssh/id_rsa' will be checked) 35 | port : {int}, optional 36 | Port number to connect to (the default is 22) 37 | 38 | Raises 39 | ------ 40 | FileNotFoundError 41 | If keyfile does not exist 42 | SSHException 43 | General exception raised if anything goes wrong during ssh connection 44 | 45 | Returns 46 | ------- 47 | paramiko.SSHClient 48 | Open ssh connection. 49 | """ 50 | 51 | logger = logging.getLogger(__name__) 52 | 53 | if not key: 54 | key = os.path.expanduser('~/.ssh/id_rsa') 55 | if not os.path.isfile(key): 56 | logger.error('{} is not a valid ssh key file...'.format(key)) 57 | raise FileNotFoundError(key) 58 | 59 | ssh = pm.SSHClient() 60 | # Append username & hostname attributes to ssh class 61 | ssh.user, ssh.host = user, host 62 | try: 63 | ssh.load_system_host_keys(os.path.expanduser('~/.ssh/known_hosts')) 64 | except (IOError, FileNotFoundError): 65 | ssh.load_system_host_keys() 66 | ssh.set_missing_host_key_policy(pm.WarningPolicy()) 67 | 68 | try: 69 | ssh.connect(hostname=host, port=port, username=user, key_filename=key, timeout=5, 70 | look_for_keys=False) 71 | # Test connection 72 | ssh.exec_command('ls', timeout=5) 73 | except (AuthenticationException, BadAuthenticationType, 74 | BadHostKeyException, ChannelException, NoValidConnectionsError, 75 | PasswordRequiredException, SSHException, PartialAuthentication, 76 | ProxyCommandFailure, timeout, gaierror) as err: 77 | logger.error('Could not connect to host {:s}: {}...'.format(host, err)) 78 | # Raise general exception to be catched outside 79 | raise SSHException(err) 80 | 81 | return ssh --------------------------------------------------------------------------------