├── .github └── workflows │ ├── snapshot.yml │ └── superlinter.yml ├── .gitignore ├── LICENSE ├── README.md ├── snapbtrex.py └── testsnap.sh /.github/workflows/snapshot.yml: -------------------------------------------------------------------------------- 1 | name: snapbtrex Test 2 | 3 | # Run this workflow every time a new commit pushed to your repository 4 | on: push 5 | 6 | jobs: 7 | # Set the job key. The key is displayed as the job name 8 | # when a job name is not provided 9 | snap-test: 10 | # Name the Job 11 | name: Test Snapshoting and local transfers 12 | # Set the type of machine to run on 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | # Checks out a copy of your repository on the ubuntu-latest machine 17 | - name: Checkout code 18 | uses: actions/checkout@v2 19 | 20 | - name: Install pv dependency 21 | run: sudo apt-get install pv 22 | 23 | - name: Setup SSH 24 | run: | 25 | ssh-keygen -t rsa -b 4096 -N '' -f ~/.ssh/id_rsa 26 | cat ~/.ssh/id_rsa.pub | tee -a ~/.ssh/authorized_keys 27 | chmod 600 ~/.ssh/authorized_keys 28 | chmod 700 ~/.ssh 29 | sudo chmod -c 0755 ~/ 30 | 31 | - name: Test SSH connection to localhost 32 | run: ssh -vv -i ~/.ssh/id_rsa -o BatchMode=yes -o StrictHostKeyChecking=no $(whoami)@localhost 33 | 34 | - name: Check if help is displayed 35 | run: ./snapbtrex.py --help 36 | 37 | - name: Check if safety net works is displayed 38 | run: ./snapbtrex.py 39 | 40 | - name: check if explanation is given 41 | run: ./snapbtrex.py --explain 42 | 43 | - name: run few internal tests 44 | run: | 45 | ./snapbtrex.py --test --path . --verbose --target-freespace 1 -S --keep-backups=3 46 | ./snapbtrex.py --test --path . --verbose --target-freespace 50T -S --keep-backups=3 47 | ./snapbtrex.py --test --path . --verbose --max-age 20w --target-backups 4 -S --keep-backups=3 48 | ./snapbtrex.py --test --path . --verbose --max-age 20y --target-backups 2 -S --keep-backups=3 49 | 50 | - name: Run snapshot test 51 | run: sudo ./testsnap.sh 52 | -------------------------------------------------------------------------------- /.github/workflows/superlinter.yml: -------------------------------------------------------------------------------- 1 | name: Super-Linter 2 | 3 | # Run this workflow every time a new commit pushed to your repository 4 | on: push 5 | 6 | jobs: 7 | # Set the job key. The key is displayed as the job name 8 | # when a job name is not provided 9 | super-lint: 10 | # Name the Job 11 | name: Lint code base 12 | # Set the type of machine to run on 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | # Checks out a copy of your repository on the ubuntu-latest machine 17 | - name: Checkout code 18 | uses: actions/checkout@v2 19 | 20 | # Runs the Super-Linter action 21 | - name: Run Super-Linter 22 | uses: github/super-linter@v3 23 | env: 24 | DEFAULT_BRANCH: main 25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | .idea 3 | 4 | btrfs.test.local 5 | local.test.img -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, yoshtec 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # snapbtrex 2 | 3 | snapbtrex is a small utility that keeps snapshots of btrfs filesystems and optionally send it to a remote system. 4 | 5 | The script came originally from . This is an extended version which is 6 | capable of transferring snapshots to remote systems. 7 | 8 | You can run it regularly (for example in a small script in cron.hourly or in crontab), or once in a while, to maintain 9 | an "interesting" (see below) set of snapshots (backups). You may manually add or remove snapshots as you like, 10 | use `snapbtrex.DATE_FORMAT` (in GMT) as snapshot-name. 11 | 12 | It will keep at most `--target-backups` snapshots and ensure that 13 | `--target-freespace` is available on the file-system by selecting snapshots to remove. 14 | 15 | Using `--keep-backups`, you can ensure that at least some backups are kept, even if `--target-freespace` cannot be 16 | satisfied. 17 | 18 | snapbtrex will keep backups with exponentially increasing distance as you go back in time. It does this by selecting 19 | snapshots to remove as follows. 20 | 21 | The snapshots to remove is selected by "scoring" each space between snapshots, (newer, older). snapbtrex will remove the 22 | older of the two snapshots in the space that have the lowest score. 23 | 24 | The scoring mechanism integrates e^x from (now-newer) to (now-older) 25 | so, new pairs will have high value, even if they are tightly packed, while older pairs will have high value if they are 26 | far apart. 27 | 28 | Alternatively you can also keep only the latest snapshots via `--keep-only-latest` 29 | or set a maximum age for your snapshots with the `--max-age` parameter. 30 | 31 | ## Transferring Snapshots to Remote Host 32 | 33 | snapbtrex uses the btrfs send and receive commands to transfer snapshots from a sending host to a receiving host via 34 | ssh. Using `--ssh-port`, you can specify the port on which such ssh connections will be attempted. 35 | 36 | Both hosts have to be prepared as in the setup instructions if you want to call the script via cronjob. You can always 37 | call snapbtrex as standalone script if you have appropriate rights. 38 | 39 | Specify your target host via `--remote-host` and the directory with the `--remote-dir` options. Both options have to be 40 | present. The target directory has to be located within a btrfs file system, and it has to be mounted via the root 41 | volume, or else btrfs might fail to receive snapshots. 42 | 43 | ### Setup instructions 44 | 45 | For transfer backups with ssh within an automated script (cronjob) you have to prepare the systems with the following 46 | steps. 47 | 48 | 1\. create user `snapbtr` on both systems 49 | 50 | ```sh 51 | sudo adduser snapbtr 52 | ``` 53 | 54 | 2\. generate ssh key on the sender and copy public key to receiving machine 55 | 56 | ```sh 57 | su - snapbtr 58 | ssh-keygen 59 | ssh-copy-id snapbtr@123.45.56.78 60 | ``` 61 | 62 | 3\. create a sudoers include file at the receiving machine (use `sudo visudo`) 63 | 64 | File: `/etc/sudoers.d/90_snapbtrrcv` 65 | 66 | Minimum content is this for receiving snapshots on a remote system: 67 | 68 | ```sh 69 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs receive* 70 | ``` 71 | 72 | If you want to link the latest transferred snapshot remotely with `--remote-link` 73 | then you will need another line (adopt path to your specific path): 74 | 75 | ```sh 76 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/ln -sfn /path/to/backups/* /path/to/current/current-link 77 | ``` 78 | 79 | If you want remote pruning of snapshots via `--remote-keep` option, then add this: 80 | 81 | ```sh 82 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs subvolume delete* 83 | ``` 84 | 85 | 4\. Create a sudoers include file on the sending machine 86 | 87 | File: `/etc/sudoers.d/90_snapbtrsnd` 88 | 89 | Contents: 90 | 91 | ```sh 92 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs send* 93 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs subvolume* 94 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs filesystem sync* 95 | ``` 96 | 97 | Hint 1: For a more secure setup you should include the specific paths at the sudoers files. 98 | 99 | Hint 2: On some Linux flavors you might find the btrfs tools in `/sbin/btrfs` 100 | opposed to `/bin/btrfs`, the sudoers files have to reflect that. Try using 101 | `which btrfs` to find out the full path to your `btrfs`. 102 | 103 | ## Examples 104 | 105 | ### Shell 106 | 107 | Snapshot a volume and keep 20 versions: 108 | 109 | ```sh 110 | sudo snapbtrex.py --snap /mnt/btrfs/@subvol1/ --path /mnt/btrfs/.mysnapshots/subvol1/ --target-backups 20 111 | ``` 112 | 113 | Snapshot a volume and copy the snapshot to different device 114 | 115 | ```sh 116 | sudo snapbtrex.py --snap /mnt/btrfs/@subvol1/ --path /mnt/btrfs/.mysnapshots/subvol1/ --target-backups 20 --sync /mnt/btrfs_archive/backups 117 | ``` 118 | 119 | ### Crontab 120 | 121 | Snapshot and transfer to remote host every day at 4:10 am, keep 52 snapshots on the origin host (keeps all remote 122 | backups, unless you delete them manually) 123 | 124 | ```sh 125 | 10 4 * * * snapbtr /opt/snapbtrex/snapbtrex.py --snap /mnt/btrfs/@subvol1/ --path /mnt/btrfs/.mysnapshots/subvol1/ --target-backups 52 --verbose --remote-host 123.45.56.78 --remote-dir /mnt/btrfs/.backup/subvol1/ >> /var/log/snapbtrex.log 126 | ``` 127 | 128 | Snapshot and transfer to remote host every day at 4:20 am, keep 10 snapshots on the origin host and keep only 50 129 | snapshots on the remote host. 130 | 131 | ```sh 132 | 20 4 * * * snapbtr /opt/snapbtrex/snapbtrex.py --snap /mnt/btrfs/@subvol2/ --path /mnt/btrfs/.mysnapshots/subvol2/ --target-backups 10 --verbose --remote-host 123.45.56.78 --remote-dir /mnt/btrfs/.backup/subvol2/ --remote-keep 50 >> /var/log/snapbtrex.log 133 | ``` 134 | 135 | ## Migrating from SnapBtr 136 | 137 | If you created snapshots with [snapbtr](https://btrfs.wiki.kernel.org/index.php/SnapBtr) 138 | then those snapshots were created as read/write snapshots. The sending of snapshots to remote hosts demands that those 139 | snaps are read only. You can change rw snaps to ro snaps in the directory of the snapshots via: 140 | 141 | ```sh 142 | sudo find . -maxdepth 1 -type d -exec btrfs property set -t s {} ro true \; 143 | ``` -------------------------------------------------------------------------------- /snapbtrex.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Author: Helge Jensen 5 | # Author: Jonas von Malottki (yt) 6 | # 7 | # Version history: 8 | # 9 | # 20150831 1.1 (yt) 10 | # * made snapshots default to readonly 11 | # * added EXEC as Keyword to find out on verbose what is actually executed 12 | # 13 | # 20160515 1.2 (yt) 14 | # * remote linking to latest transferred snapshot 15 | # * logging improvements 16 | # 17 | # 20160516 1.3 (yt) 18 | # * remote deleting of snapshots 19 | # 20 | # 20160527 1.4 (yt) 21 | # * Allowing just taking a snapshot without cleanup 22 | # 23 | # 20171202 1.5 (yt) 24 | # * Local syncing of snapshots 25 | # * Dry run mode 26 | # 27 | # 20171223 1.6 (yt) 28 | # * Error handling 29 | # 30 | # 20180419 1.7 (yt) 31 | # * Added --keep-only-latest modifier 32 | # * --dry-run should not exit on deletion anymore 33 | # 34 | # 20191124 1.8 (yt) 35 | # * fixed --sync-keep 36 | # 37 | # 38 | # IDEA: change to different time format for integration with samba vfs 39 | # https://www.samba.org/samba/docs/man/manpages/vfs_shadow_copy2.8.html 40 | 41 | """ 42 | snapbtrex is a small utility that keeps snapshots of btrfs filesystems 43 | and optionally send it to a remote system. 44 | 45 | snapbtrex is hosted on github: 46 | https://github.com/yoshtec/snapbtrex 47 | 48 | You can run it regularly (for example in a small script in 49 | cron.hourly), or once in a while, to maintain an "interesting" (see 50 | below) set of snapshots (backups). You may manually add or remove 51 | snapshots as you like, use 'snapbtrex.DATE_FORMAT' (in GMT) as 52 | snapshot-name. 53 | 54 | It will keep at most --target-backups snapshots and ensure that 55 | --target-freespace is available on the file-system by selecting 56 | snapshots to remove. 57 | 58 | Using --keep-backups, you can ensure that at least some backups are 59 | kept, even if --target-freespace cannot be satisfied. 60 | 61 | snapbtrex will keep backups with exponentially increasing distance as 62 | you go back in time. It does this by selecting snapshots to remove as 63 | follows. 64 | 65 | The snapshots to remove is selected by "scoring" each space between 66 | snapshots, (newer,older). snapbtrex will remove the older of the two 67 | snapshots in the space that have the lowest score. 68 | 69 | The scoring mechanism integrates e^x from (now-newer) to (now-older) 70 | so, new pairs will have high value, even if they are tightly packed, 71 | while older pairs will have high value if they are far apart. 72 | 73 | The mechanism is completely self-contained and you can delete any 74 | snapshot manually or any files in the snapshots. 75 | 76 | 77 | == Transferring Snapshots to Remote Host 78 | 79 | snapbtrex uses the btrfs send and receive commands to transfer 80 | snapshots from a sending host to a receiving host. 81 | 82 | Both hosts have to be prepared as in the setup instructions if 83 | you want to call the script via cronjob. 84 | 85 | == Setup instructions 86 | transfer with backups with ssh 87 | 88 | 1. create user snapbtr on both systems 89 | -- 90 | sudo adduser snapbtr 91 | -- 92 | 93 | 2. generate ssh key on snd put public into rcv 94 | -- 95 | ssh-keygen 96 | ssh-copy-id snapbtr@123.45.56.78 97 | -- 98 | 99 | 3. create a sudoers file at the receiving machine 100 | File: /etc/sudoers.d/90_snapbtrrcv 101 | 102 | Precaution: depending on your distribution the path for btrfs tools might differ! 103 | 104 | Minimum content is this for receiving snapshots on a remote system: 105 | -- 106 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs receive* 107 | -- 108 | 109 | If you want to link the latest transferred item remotely to path then you'll 110 | need another line (adopt path to your specific path): 111 | 112 | -- 113 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/ln -sfn /path/to/backups/* /path/to/current/current-link 114 | -- 115 | 116 | If you need remote pruning then add this (you can also add the path for more secure setup): 117 | -- 118 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs subvolume delete* 119 | -- 120 | 121 | 122 | 4. Create a sudoers include file on the sending machine 123 | 124 | File: /etc/sudoers.d/90_snapbtrsnd 125 | 126 | Precaution: depending on your distribution the path for btrfs tools might differ! 127 | 128 | Contents: 129 | -- 130 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs send* 131 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs filesystem sync* 132 | snapbtr ALL=(root:nobody) NOPASSWD:NOEXEC: /bin/btrfs subvolume* 133 | -- 134 | 135 | """ 136 | 137 | import itertools 138 | import math 139 | import os 140 | import os.path 141 | import sys 142 | import time 143 | 144 | DATE_FORMAT = "%Y%m%d-%H%M%S" # date format used for directories to clean 145 | 146 | DEFAULT_KEEP_BACKUPS = 10 147 | 148 | LOG_LOCAL = "Local > " 149 | LOG_REMOTE = "Remote > " 150 | LOG_EXEC = "EXEC >-> " 151 | LOG_STDERR = "STDERR > " 152 | LOG_OUTPUT = "OUTPUT > " 153 | 154 | # find TIME_SCALE: t < 2**32 => e**(t/c) < 2**32 155 | TIME_SCALE = math.ceil(float((2 ** 32) / math.log(2 ** 32))) 156 | 157 | 158 | def timef(x): 159 | # make value inverse exponential in the time passed 160 | try: 161 | v = math.exp(_timestamp(x) / TIME_SCALE) 162 | except ZeroDivisionError: 163 | v = None 164 | return v 165 | 166 | 167 | def timestamp(x): 168 | try: 169 | v = _timestamp(x) 170 | except ValueError: 171 | v = None 172 | return v 173 | 174 | 175 | def _timestamp(x): 176 | return time.mktime(time.strptime(os.path.split(x)[1], DATE_FORMAT)) 177 | 178 | 179 | def sorted_age(dirs, max_age): 180 | for xv, x in sorted((timestamp(y), y) for y in dirs): 181 | if xv < max_age: 182 | yield x 183 | 184 | 185 | def first(it): 186 | for x in it: 187 | return x 188 | 189 | 190 | def sorted_value(dirs): 191 | if len(dirs) <= 0: 192 | return dirs 193 | else: 194 | return _sorted_value(dirs) 195 | 196 | 197 | def _sorted_value(dirs): 198 | # Iterate dirs, sorted by their relative value when deleted 199 | def poles(items): 200 | # Yield (items[0], items[1]), (items[1], items[2]), ... (items[n-1], items[n]) 201 | rest = iter(items) 202 | last = next(rest) 203 | for n in rest: 204 | yield last, n 205 | last = n 206 | 207 | def all_but_last(items): 208 | # Yield items[0], ..., items[n-1] 209 | rest = iter(items) 210 | last = next(rest) 211 | for x in rest: 212 | yield last 213 | last = x 214 | 215 | # Remaining candidates for yield, 216 | # except the "max" one (latest) 217 | candidates = dict( 218 | all_but_last((x, xf) for xf, x in sorted((timef(y), y) for y in dirs) if xf) 219 | ) 220 | # Keep going as long as there is anything to remove 221 | while len(candidates) > 1: 222 | # Get candidates ordered by timestamp (as v is monotonic in timestamp) 223 | remain = sorted((v, k) for k, v in candidates.items()) 224 | # Find the "amount of information we loose by deleting the 225 | # latest of the pair" 226 | diffs = list( 227 | (to_tf - frm_tf, frm, to) for ((frm_tf, frm), (to_tf, to)) in poles(remain) 228 | ) 229 | # Select the least important one 230 | mdiff, mfrm, mto = min(diffs) 231 | 232 | del candidates[mto] # That's not a candidate any longer, it's gonna go 233 | yield mto 234 | 235 | # also, we must delete the last entry 236 | yield next(iter(candidates.keys())) 237 | 238 | 239 | class Operations: 240 | def __init__(self, path, trace=None): 241 | self.tracef = trace 242 | self.path = path 243 | 244 | def check_call(self, args, shell=False, dry_safe=False): 245 | import subprocess 246 | 247 | cmd_str = " ".join(args) 248 | self.trace(LOG_EXEC + cmd_str) 249 | p = subprocess.Popen( 250 | args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell 251 | ) 252 | stdout, stderr = p.communicate() 253 | 254 | if stderr: 255 | stderr = stderr.decode(encoding=sys.stderr.encoding, errors="ignore") 256 | self.trace(LOG_STDERR + stderr) 257 | 258 | if stdout: 259 | stdout = stdout.decode(encoding=sys.stdout.encoding, errors="ignore") 260 | self.trace(LOG_OUTPUT + stdout) 261 | 262 | if p.returncode != 0: 263 | raise RuntimeError(f"failed {cmd_str}") 264 | return stdout # return the content 265 | 266 | def sync(self, dir): 267 | # syncing to be sure the operation is on the disc 268 | self.trace(LOG_LOCAL + f"sync filesystem {dir}") 269 | args = ["sudo", "btrfs", "filesystem", "sync", dir] 270 | self.check_call(args) 271 | self.trace(LOG_LOCAL + f"done sync filesystem {dir}") 272 | 273 | def unsnap(self, dir): 274 | self.unsnapx(os.path.join(self.path, dir)) 275 | 276 | def unsnapx(self, dir): 277 | self.trace(LOG_LOCAL + f"remove snapshot {dir}") 278 | args = ["sudo", "btrfs", "subvolume", "delete", dir] 279 | self.check_call(args) 280 | self.trace(LOG_LOCAL + f"done remove snapshot {dir}") 281 | 282 | def freespace(self): 283 | # sync filesystem before assessing the free space 284 | self.sync(self.path) 285 | st = os.statvfs(self.path) 286 | self.trace( 287 | LOG_LOCAL + f"filesystem info: {st}" 288 | ) # https://www.spinics.net/lists/linux-btrfs/msg103660.html 289 | return st.f_bavail * st.f_bsize 290 | 291 | def listdir(self): 292 | return [d for d in os.listdir(self.path) if timef(d)] 293 | 294 | def listdir_path(self, target_path): 295 | return [d for d in os.listdir(target_path) if timef(d)] 296 | 297 | def listremote_dir(self, receiver, receiver_path, ssh_port): 298 | self.trace( 299 | LOG_REMOTE + f"list remote files host={receiver}, dir={receiver_path}" 300 | ) 301 | args = ["ssh", "-p", ssh_port, receiver, "ls -1 " + receiver_path] 302 | return [ 303 | d for d in self.check_call(args, dry_safe=True).splitlines() if timef(d) 304 | ] 305 | 306 | def snap(self, path): 307 | # yt: changed to readonly snapshots 308 | newdir = os.path.join(self.path, self.datestamp()) 309 | self.trace(LOG_LOCAL + f"snapshotting path={path} to newdir={newdir}") 310 | args = ["sudo", "btrfs", "subvolume", "snapshot", "-r", path, newdir] 311 | self.check_call(args) 312 | self.sync(self.path) # yt: make sure the new snap is on the disk 313 | self.trace(LOG_LOCAL + "done snapshotting") 314 | return newdir # yt: return the latest snapshot 315 | 316 | @staticmethod 317 | def datestamp(secs=None): 318 | return time.strftime(DATE_FORMAT, time.gmtime(secs)) 319 | 320 | def trace(self, *args, **kwargs): 321 | f = self.tracef 322 | if f: 323 | f(*args, **kwargs) 324 | 325 | def send_single(self, snap, receiver, receiver_path, ssh_port, rate_limit): 326 | self.trace( 327 | LOG_REMOTE 328 | + f"send single snapshot from {snap} to host {receiver} path={receiver_path}" 329 | ) 330 | args = [ 331 | f"sudo btrfs send -v {os.path.join(self.path, snap)}" 332 | + f" | pv -brtfL {rate_limit} | " 333 | + f"ssh -p {ssh_port} {receiver} 'sudo btrfs receive {receiver_path} '" 334 | ] 335 | # TODO: breakup the pipe stuff and do it without shell=True, currently it has problems with pipes :( 336 | self.check_call(args, shell=True) 337 | 338 | def send_withparent( 339 | self, parent_snap, snap, receiver, receiver_path, ssh_port, rate_limit 340 | ): 341 | self.trace( 342 | LOG_REMOTE 343 | + f"send snapshot from {snap} with parent {parent_snap} to host {receiver} path={receiver_path}" 344 | ) 345 | args = [ 346 | f"sudo btrfs send -v -p {os.path.join(self.path, parent_snap)} {os.path.join(self.path, snap)}" 347 | + f" | pv -brtfL {rate_limit} | " 348 | + f"ssh -p {ssh_port} {receiver} 'sudo btrfs receive -v {receiver_path} '" 349 | ] 350 | self.check_call(args, shell=True) 351 | self.trace(LOG_REMOTE + "finished sending snapshot") 352 | 353 | def link_current(self, receiver, receiver_path, snap, link_target, ssh_port): 354 | self.trace( 355 | LOG_REMOTE 356 | + f"linking current snapshot host={receiver} path={receiver_path} snap={snap} link={link_target}" 357 | ) 358 | args = [ 359 | "ssh", 360 | "-p", 361 | ssh_port, 362 | receiver, 363 | f"sudo ln -sfn '{os.path.join(receiver_path, snap)}' {link_target}", 364 | ] 365 | self.check_call(args) 366 | 367 | def remote_unsnap(self, receiver, receiver_path, dir, ssh_port): 368 | self.trace( 369 | LOG_REMOTE 370 | + f"delete snapshot {dir} from host={receiver} path={receiver_path}" 371 | ) 372 | args = [ 373 | "ssh", 374 | "-p", 375 | ssh_port, 376 | receiver, 377 | f"sudo btrfs subvolume delete '{os.path.join(receiver_path, dir)}'", 378 | ] 379 | self.check_call(args) 380 | self.trace(LOG_REMOTE + "deleted") 381 | 382 | def sync_single(self, snap, target): 383 | self.trace(LOG_LOCAL + "sync single snapshot %s to %s", snap, target) 384 | args = [ 385 | f"sudo btrfs send -v {os.path.join(self.path, snap)}" 386 | + " | pv -brtf | " 387 | + f"sudo btrfs receive -v {target}" 388 | ] 389 | self.check_call(args, shell=True) 390 | 391 | def sync_withparent(self, parent_snap, snap, target_path): 392 | self.trace( 393 | LOG_LOCAL 394 | + f"send snapshot from {snap} with parent {parent_snap} to path={target_path}" 395 | ) 396 | args = [ 397 | f"sudo btrfs send -v -p {os.path.join(self.path, parent_snap)} {os.path.join(self.path, snap)}" 398 | " | pv -brtf | " 399 | f"sudo btrfs receive -v {target_path}" 400 | ] 401 | self.check_call(args, shell=True) 402 | 403 | 404 | # Allows to Simulate operations 405 | class DryOperations(Operations): 406 | def __init__(self, path, trace=None): 407 | Operations.__init__(self, path=path, trace=trace) 408 | self.dirs = None 409 | 410 | def check_call(self, args, shell=False, dry_safe=False): 411 | cmd_str = " ".join(args) 412 | if dry_safe: 413 | self.trace(LOG_EXEC + "executing dry-safe command: " + cmd_str) 414 | return Operations.check_call(self, args, shell, dry_safe) 415 | else: 416 | self.trace(LOG_EXEC + cmd_str) 417 | 418 | # added to simulate also the deletion of snapshots 419 | def listdir(self): 420 | if self.dirs is None: 421 | self.dirs = [d for d in os.listdir(self.path) if timef(d)] 422 | return self.dirs 423 | 424 | def unsnap(self, dir): 425 | Operations.unsnap(self, dir) 426 | self.dirs.remove(dir) 427 | 428 | 429 | class FakeOperations(DryOperations): 430 | def __init__(self, path, trace=None, dirs=None, space=None, snap_space=None): 431 | Operations.__init__(self, path=path, trace=trace) 432 | if dirs is None: 433 | dirs = {} 434 | if space is None: 435 | space = 0 436 | self.dirs = dirs 437 | self.space = space 438 | if snap_space is None: 439 | snap_space = 1 440 | self.snap_space = snap_space 441 | 442 | def snap(self, path): 443 | self.dirs[self.datestamp()] = self.snap_space 444 | Operations.snap(self, path) 445 | 446 | def unsnap(self, dir): 447 | v = self.dirs[dir] 448 | self.space += v 449 | Operations.unsnap(self, dir) 450 | del self.dirs[dir] 451 | 452 | def listdir(self): 453 | self.trace(f"listdir() = {self.dirs.keys()}") 454 | return self.dirs.keys() 455 | 456 | def listdir_path(self, target_path): 457 | dirs = ["20101201-030000", "20101201-040000", "20101201-050000"] 458 | self.trace(f"listdir_path() values={dirs}") 459 | return dirs 460 | 461 | def listremote_dir(self, receiver, receiver_path, ssh_port): 462 | dirs = [ 463 | "20101201-030000", 464 | "20101201-040000", 465 | "20101201-050000", 466 | "20101201-070000", 467 | ] 468 | self.trace(f"listremotedir() r={receiver}, rp={receiver_path}, values={dirs}") 469 | return dirs 470 | 471 | def freespace(self): 472 | self.trace(f"freespace() = {self.space}") 473 | return self.space 474 | 475 | 476 | def cleandir(operations, targets): 477 | """ Perform actual cleanup of using 'operations' until 'targets' are met """ 478 | 479 | trace = operations.trace 480 | keep_backups = targets.keep_backups 481 | keep_latest = targets.keep_latest 482 | target_fsp = targets.target_freespace 483 | target_backups = targets.target_backups 484 | max_age = targets.max_age 485 | was_above_target_freespace = None 486 | was_above_target_backups = None 487 | last_dirs = [] 488 | 489 | trace( 490 | LOG_LOCAL 491 | + f"Parameters for cleandir: keep_backups={keep_backups}, target_freespace={target_fsp}, " 492 | f"target_backups={target_backups}, max_age={max_age}, keep_latest={keep_latest}" 493 | ) 494 | next_del = None 495 | 496 | while True: 497 | do_del = None 498 | dirs = sorted(operations.listdir()) 499 | dirs_len = len(dirs) 500 | if dirs_len <= 0: 501 | raise Exception("No more directories to clean") 502 | elif dirs == last_dirs: 503 | raise Exception(f"Could not delete last snapshot: {next_del}") 504 | else: 505 | last_dirs = dirs 506 | 507 | # check at least keep this amount of backups 508 | if keep_backups is not None: 509 | if dirs_len <= keep_backups: 510 | trace( 511 | LOG_LOCAL 512 | + f"current amount of backups: {dirs_len} have to keep a minimum of {keep_backups}," 513 | f" stopping further deletion" 514 | ) 515 | break 516 | 517 | if target_fsp is not None: 518 | fsp = operations.freespace() 519 | # print "+++ ", fsp, target_fsp, fsp >= target_fsp 520 | if fsp >= target_fsp: 521 | if was_above_target_freespace or was_above_target_freespace is None: 522 | trace( 523 | LOG_LOCAL 524 | + f"Satisfied freespace target={target_fsp}; current free space={fsp}" 525 | ) 526 | was_above_target_freespace = False 527 | if do_del is None: 528 | do_del = False 529 | else: 530 | if was_above_target_freespace is None: 531 | was_above_target_freespace = True 532 | do_del = True 533 | 534 | if target_backups is not None: 535 | if dirs_len <= target_backups: 536 | if was_above_target_backups or was_above_target_backups is None: 537 | trace( 538 | LOG_LOCAL 539 | + f"Satisfied target number of backups: {target_backups} with {dirs_len}" 540 | ) 541 | was_above_target_backups = False 542 | if do_del is None: 543 | do_del = False 544 | else: 545 | if was_above_target_backups is None: 546 | was_above_target_backups = True 547 | do_del = True 548 | 549 | if not do_del: 550 | break 551 | 552 | next_del = None 553 | if max_age is not None: 554 | next_del = first(sorted_age(dirs, max_age)) 555 | # remove latest first only if the keep_latest is 'True' 556 | if keep_latest is not None and keep_latest: 557 | next_del = first(dirs) 558 | if next_del is None: 559 | next_del = first(sorted_value(dirs)) 560 | else: 561 | trace(LOG_LOCAL + "will delete backup: '%s'", operations.datestamp(max_age)) 562 | if next_del is None: 563 | trace(LOG_LOCAL + "No more backups left") 564 | break 565 | else: 566 | operations.unsnap(next_del) 567 | 568 | 569 | def transfer(operations, target_host, target_dir, link_dir, ssh_port, rate_limit): 570 | """ Transfer snapshots to remote host """ 571 | 572 | trace = operations.trace 573 | 574 | # find out what kind of snapshots exist on the remote host 575 | targetsnaps = set(operations.listremote_dir(target_host, target_dir, ssh_port)) 576 | localsnaps = set(operations.listdir()) 577 | 578 | if len(localsnaps) == 0: 579 | # nothing to do here, no snaps here 580 | return 581 | 582 | parents = targetsnaps.intersection(localsnaps) 583 | 584 | # no parent exists so 585 | if len(parents) == 0: 586 | # start transferring the oldest snapshot 587 | # by that snapbtrex will transfer all snapshots that have been created 588 | operations.send_single( 589 | min(localsnaps), target_host, target_dir, ssh_port, rate_limit 590 | ) 591 | parents.add(min(localsnaps)) 592 | 593 | # parent existing, use the latest as parent 594 | max_parent = max(parents) 595 | parent = max_parent 596 | 597 | trace(LOG_REMOTE + f"last possible parent = {max_parent}") 598 | 599 | for s in sorted(localsnaps): 600 | if s > max_parent: 601 | trace(LOG_REMOTE + f"transfer: parent={parent} snap={s}") 602 | operations.send_withparent( 603 | parent, s, target_host, target_dir, ssh_port, rate_limit 604 | ) 605 | if link_dir is not None: 606 | operations.link_current(target_host, target_dir, s, link_dir, ssh_port) 607 | # advance one step 608 | parent = s 609 | 610 | 611 | def remotecleandir(operations, target_host, target_dir, remote_keep, ssh_port): 612 | """ Perform remote cleanup using 'operations' until exactly remote_keep backups are left """ 613 | trace = operations.trace 614 | 615 | if remote_keep is not None: 616 | dirs = sorted( 617 | operations.listremote_dir( 618 | receiver=target_host, receiver_path=target_dir, ssh_port=ssh_port 619 | ) 620 | ) 621 | dirs_len = len(dirs) 622 | if dirs_len <= remote_keep or remote_keep <= 0: 623 | trace( 624 | LOG_REMOTE 625 | + "No remote directories to clean, currently %s remote backups, should keep %s", 626 | dirs_len, 627 | remote_keep, 628 | ) 629 | else: 630 | delete_dirs = sorted_value(dirs) 631 | del_count = dirs_len - remote_keep 632 | trace( 633 | LOG_REMOTE 634 | + f"about to remove {del_count} of out of {dirs_len} backups, keeping {remote_keep}" 635 | ) 636 | for del_dir in itertools.islice(delete_dirs, del_count): 637 | if del_dir is None: 638 | trace(LOG_REMOTE + "No more backups left") 639 | break 640 | else: 641 | operations.remote_unsnap(target_host, target_dir, del_dir, ssh_port) 642 | 643 | 644 | def sync_local(operations, sync_dir): 645 | """ Transfer snapshots to local target """ 646 | trace = operations.trace 647 | 648 | # find out what kind of snapshots exist on the remote host 649 | targetsnaps = set(operations.listdir_path(sync_dir)) 650 | localsnaps = set(operations.listdir()) 651 | 652 | if len(localsnaps) == 0: 653 | # nothing to do here, no snaps here 654 | return 655 | 656 | parents = targetsnaps.intersection(localsnaps) 657 | 658 | # no parent exists so 659 | if len(parents) == 0: 660 | # start transferring the oldest snapshot 661 | # by that snapbtrex will transfer all snapshots that have been created 662 | operations.sync_single(min(localsnaps), sync_dir) 663 | parents.add(min(localsnaps)) 664 | 665 | # parent existing, use the latest as parent 666 | max_parent = max(parents) 667 | parent = max_parent 668 | 669 | trace(LOG_LOCAL + f"Sync: last possible parent = {max_parent}") 670 | 671 | for s in sorted(localsnaps): 672 | if s > max_parent: 673 | trace(LOG_LOCAL + f"transfer: parent={parent} snap={s}") 674 | operations.sync_withparent(parent, s, sync_dir) 675 | # if link_dir is not None: 676 | # operations.link_current(target_host, target_dir, s, link_dir, ssh_port) 677 | parent = s 678 | 679 | 680 | def sync_cleandir(operations, target_dir, sync_keep): 681 | """ Perform local sync cleanup using 'operations' until exactly sync_keep backups are left """ 682 | trace = operations.trace 683 | 684 | if sync_keep is not None: 685 | dirs = sorted(operations.listdir_path(target_dir)) 686 | dirs_len = len(dirs) 687 | if dirs_len <= sync_keep or sync_keep <= 0: 688 | trace( 689 | LOG_LOCAL 690 | + f"No synced directories to clean, currently {dirs_len} synced backups, should keep {sync_keep}" 691 | ) 692 | else: 693 | delete_dirs = sorted_value(dirs) 694 | del_count = dirs_len - sync_keep 695 | trace( 696 | LOG_LOCAL 697 | + f"about to remove sync {del_count} of out of {dirs_len} synced backups, keeping {sync_keep}" 698 | ) 699 | for del_dir in itertools.islice(delete_dirs, del_count): 700 | trace(LOG_LOCAL + "removing: ") 701 | if del_dir is None: 702 | trace(LOG_LOCAL + "No more synced backups left") 703 | break 704 | else: 705 | operations.unsnapx(os.path.join(target_dir, del_dir)) 706 | 707 | 708 | def log_trace(fmt, *args, **kwargs): 709 | tt = time.strftime(DATE_FORMAT, time.gmtime(None)) + ": " 710 | if args is not None: 711 | print(tt + (fmt % args)) 712 | elif kwargs is not None: 713 | print(tt + (fmt % kwargs)) 714 | else: 715 | print(tt + fmt) 716 | 717 | 718 | def default_trace(fmt, *args, **kwargs): 719 | if args is not None: 720 | print(fmt % args) 721 | elif kwargs is not None: 722 | print(fmt % kwargs) 723 | else: 724 | print(fmt) 725 | 726 | 727 | def null_trace(fmt, *args, **kwargs): 728 | pass 729 | 730 | 731 | def main(argv): 732 | import argparse 733 | 734 | class UnitInt(int): 735 | format = "" 736 | mods = {} 737 | 738 | @staticmethod 739 | def parse(cls, target_str): 740 | import re 741 | 742 | form = cls.format % "|".join(x for x in cls.mods.keys() if x is not None) 743 | m = re.match(form, target_str, re.IGNORECASE) 744 | if m: 745 | val, mod = m.groups() 746 | result = cls.eval(int(val), mod) 747 | return result 748 | else: 749 | raise ValueError(f"Invalid value: {target_str}, expected: {form}") 750 | 751 | def __init__(self, value): 752 | super().__init__(value) 753 | self.origin = value 754 | 755 | def __new__(cls, value=0): 756 | if isinstance(value, str): 757 | value = UnitInt.parse(cls, value) 758 | return value 759 | 760 | def __str__(self): 761 | if isinstance(self.origin, int): 762 | return str(self.origin) 763 | else: 764 | return "%s[%s]" % (self.origin, int(self)) 765 | 766 | class Space(UnitInt): 767 | format = "([0-9]+)(%s)?" 768 | mods = {None: 0, "K": 1, "M": 2, "G": 3, "T": 4} 769 | 770 | @staticmethod 771 | def eval(val, mod): 772 | if mod is None: 773 | return val 774 | else: 775 | return val * 1024 ** Space.mods[mod.upper()] 776 | 777 | class Age(UnitInt): 778 | format = "([0-9]+)(%s)?" 779 | mods = { 780 | None: 1, 781 | "s": 1, 782 | "m": 60, 783 | "h": 60 * 60, 784 | "d": 24 * 60 * 60, 785 | "w": 7 * 24 * 60 * 60, 786 | "y": (52 * 7 + 1) * 24 * 60 * 60, # year = 52 weeks + 1 or 2 days 787 | } 788 | 789 | @staticmethod 790 | def eval(val, mod): 791 | if mod is None: 792 | return max(0, time.time() - val) 793 | else: 794 | return max(0, time.time() - val * Age.mods[mod.lower()]) 795 | 796 | def parse_ageoffset_to_timestamp(age_str): 797 | now = time.time() 798 | age = int(age_str) 799 | if age > now: 800 | raise "Invalid value: %d, expected less than: %d" % (age, now) 801 | else: 802 | return float(now - age) 803 | 804 | parser = argparse.ArgumentParser( 805 | description="Keep btrfs snapshots for backup, optionally sync to snapshots locally or sends snapshots to " 806 | "remote systems via ssh. Visit https://github.com/yoshtec/snapbtrex for more insight." 807 | ) 808 | 809 | parser.add_argument( 810 | "--path", 811 | "-p", 812 | "--snap-to", 813 | metavar="PATH", 814 | required=False, 815 | help="Target path for new snapshots and cleanup operations", 816 | ) 817 | 818 | target_group = parser.add_argument_group( 819 | title="Cleanup", description="Delete backup snapshots until the targets are met" 820 | ) 821 | 822 | target_group.add_argument( 823 | "--target-freespace", 824 | "-F", 825 | dest="target_freespace", 826 | metavar="SIZE", 827 | default=None, 828 | type=Space, 829 | help="Cleanup PATH until at least SIZE is free. SIZE is #bytes, " 830 | + "or given with K, M, G or T respectively for kilo, ...", 831 | ) 832 | 833 | target_group.add_argument( 834 | "--target-backups", 835 | "-B", 836 | dest="target_backups", 837 | metavar="#", 838 | type=int, 839 | help="Cleanup PATH until at most B backups remain", 840 | ) 841 | 842 | target_group.add_argument( 843 | "--keep-backups", 844 | "-K", 845 | metavar="N", 846 | type=int, 847 | default=DEFAULT_KEEP_BACKUPS, 848 | help="Keep minimum of N backups -> This is a lower bound. the lower bound is valid for all other options", 849 | ) 850 | 851 | target_group.add_argument( 852 | "--max-age", 853 | "-A", 854 | dest="max_age", 855 | metavar="MAX_AGE", 856 | default=None, 857 | type=Age, 858 | help="Prefer removal of backups older than MAX_AGE seconds. MAX_AGE is #seconds, " 859 | + "or given with m (minutes), h (hours), d (days), w (weeks), y (years = 52w + 1d).", 860 | ) 861 | 862 | target_group.add_argument( 863 | "--keep-only-latest", 864 | "-L", 865 | dest="keep_latest", 866 | action="store_true", 867 | help="lets you keep only the latest snapshots", 868 | ) 869 | 870 | snap_group = parser.add_mutually_exclusive_group(required=False) 871 | 872 | snap_group.add_argument( 873 | "--snap", 874 | "-s", 875 | "--snap-this", 876 | metavar="SUBVOL", 877 | default=".", 878 | help="Take snapshot of SUBVOL on invocation", 879 | ) 880 | 881 | snap_group.add_argument( 882 | "--no-snap", 883 | "-S", 884 | dest="snap", 885 | help="Do not take snapshot", 886 | action="store_const", 887 | const=None, 888 | ) 889 | 890 | parser.add_argument("--test", help="Execute built-in tests", action="store_true") 891 | 892 | parser.add_argument( 893 | "--explain", help="Explain what %(prog)s does (and stop)", action="store_true" 894 | ) 895 | 896 | parser.add_argument( 897 | "--dry-run", 898 | help="Do not execute commands, but print shell commands to stdout that would be executed", 899 | dest="dry_run", 900 | action="store_true", 901 | ) 902 | 903 | parser.add_argument("--verbose", "-v", help="Verbose output", action="store_true") 904 | 905 | transfer_group = parser.add_argument_group( 906 | title="Transfer", 907 | description="Transfer snapshots to other hosts via ssh. " 908 | + "It is assumed that the user running the script is run can connect to the remote host " 909 | + "via keys and without passwords. See --explain or visit the homepage for more info", 910 | ) 911 | 912 | transfer_group.add_argument( 913 | "--remote-host", 914 | metavar="HOST", 915 | dest="remote_host", 916 | help="Transfer to target host via ssh.", 917 | ) 918 | 919 | transfer_group.add_argument( 920 | "--remote-dir", 921 | metavar="PATH", 922 | dest="remote_dir", 923 | help="Transfer the snapshot to this PATH on the target host", 924 | ) 925 | 926 | transfer_group.add_argument( 927 | "--remote-link", 928 | metavar="LINK", 929 | dest="remote_link", 930 | help="Create a link the transferred snapshot to this LINK", 931 | ) 932 | 933 | transfer_group.add_argument( 934 | "--remote-keep", 935 | metavar="N", 936 | type=int, 937 | dest="remote_keep", 938 | help="Cleanup remote backups until N backups remain, if unset keep all remote transferred backups", 939 | ) 940 | 941 | transfer_group.add_argument( 942 | "--ssh-port", metavar="SSHPORT", dest="ssh_port", default="22", help="SSH port" 943 | ) 944 | 945 | transfer_group.add_argument( 946 | "--rate-limit", 947 | metavar="RATE", 948 | dest="rate_limit", 949 | default="0", 950 | help="Limit the transfer to a maximum of RATE bytes per " 951 | + 'second. A suffix of "k", "m", "g", or "t" can be added ' 952 | + "to denote kilobytes (*1024), megabytes, and so on.", 953 | ) 954 | 955 | sync_group = parser.add_argument_group( 956 | title="Sync Local", 957 | description="Transfer snapshots to another local (btrfs) filesystem.", 958 | ) 959 | 960 | sync_group.add_argument( 961 | "--sync-target", 962 | metavar="PATH", 963 | dest="sync_dir", 964 | help="Copy snapshot to this path", 965 | ) 966 | 967 | sync_group.add_argument( 968 | "--sync-keep", 969 | metavar="N", 970 | type=int, 971 | dest="sync_keep", 972 | help="Cleanup local synced backups until N backups remain, if unset keep all locally synced backups", 973 | ) 974 | 975 | # safety net if no arguments are given call for usage 976 | if len(sys.argv[1:]) == 0: 977 | parser.print_usage() 978 | return 0 979 | 980 | pa = parser.parse_args() 981 | 982 | if pa.verbose: 983 | if sys.stdout.isatty(): 984 | trace = default_trace 985 | else: 986 | # use logging with timestamps on script output 987 | trace = log_trace 988 | else: 989 | trace = null_trace 990 | 991 | if pa.explain: 992 | sys.stdout.write(__doc__) 993 | return 0 994 | 995 | if pa.path is None: 996 | print("Path is missing") 997 | parser.print_help() 998 | return 1 999 | 1000 | # test if pv is installed for needed actions 1001 | if ( 1002 | not (pa.remote_host is None and pa.remote_dir is None) 1003 | or pa.sync_dir is not None 1004 | ): 1005 | import shutil 1006 | 1007 | pv = shutil.which("pv") 1008 | if pv is None: 1009 | print("Error: Missing dependency 'pv' for transfer of snapshots") 1010 | print("install e.g. via 'apt install pv'") 1011 | return 1 1012 | 1013 | if pa.test: 1014 | trace("## TEST ##") 1015 | trace( 1016 | "## TEST ## Testing mode: all operations are only displayed without execution" 1017 | ) 1018 | trace("## TEST ##") 1019 | operations = FakeOperations( 1020 | path=pa.path, 1021 | trace=trace, 1022 | dirs={ 1023 | "20101201-000000": 0, 1024 | "20101201-010000": 1, 1025 | "20101201-020000": 2, 1026 | "20101201-030000": 3, 1027 | "20101201-040000": 4, 1028 | "20101201-050000": 5, 1029 | "20101201-060000": 6, 1030 | "20101201-070000": 7, 1031 | "20101201-080000": 8, 1032 | }, 1033 | space=5, 1034 | ) 1035 | elif pa.dry_run: 1036 | trace("## DRY RUN ##") 1037 | trace( 1038 | "## DRY RUN ## Dry Run mode: disk-modifying operations are only displayed without execution" 1039 | ) 1040 | trace("## DRY RUN ##") 1041 | operations = DryOperations(path=pa.path, trace=trace) 1042 | else: 1043 | operations = Operations(path=pa.path, trace=trace) 1044 | 1045 | # -- Actions -- 1046 | # 1. Snapshot 1047 | if pa.snap: 1048 | operations.snap(path=pa.snap) 1049 | 1050 | # 2. remote transfer: host and remote dir are needed 1051 | if not (pa.remote_host is None and pa.remote_dir is None): 1052 | try: 1053 | transfer( 1054 | operations, 1055 | pa.remote_host, 1056 | pa.remote_dir, 1057 | pa.remote_link, 1058 | pa.ssh_port, 1059 | pa.rate_limit, 1060 | ) 1061 | if pa.remote_keep is not None: 1062 | remotecleandir( 1063 | operations, 1064 | pa.remote_host, 1065 | pa.remote_dir, 1066 | pa.remote_keep, 1067 | pa.ssh_port, 1068 | ) 1069 | except RuntimeError as e: 1070 | trace(LOG_REMOTE + f"Error while transferring to remote host: {e}") 1071 | 1072 | # 3. Local sync to another path 1073 | if pa.sync_dir is not None: 1074 | try: 1075 | sync_local(operations, pa.sync_dir) 1076 | if pa.sync_keep is not None: 1077 | sync_cleandir(operations, pa.sync_dir, pa.sync_keep) 1078 | except RuntimeError as e: 1079 | trace(LOG_LOCAL + f"ERROR while Syncing local: {e}") 1080 | 1081 | # 4. Cleanup local 1082 | if pa.target_freespace is not None or pa.target_backups is not None: 1083 | try: 1084 | if pa.keep_backups == DEFAULT_KEEP_BACKUPS: 1085 | trace( 1086 | LOG_LOCAL 1087 | + f"using default value for --keep-backups: {DEFAULT_KEEP_BACKUPS}" 1088 | ) 1089 | cleandir(operations=operations, targets=pa) 1090 | except RuntimeError as e: 1091 | trace(LOG_LOCAL + f"ERROR while cleaning up: {e}") 1092 | else: 1093 | trace( 1094 | LOG_LOCAL + "no options for cleaning were passed -> keeping all snapshots" 1095 | ) 1096 | 1097 | 1098 | if "__main__" == __name__: 1099 | sys.exit(main(sys.argv)) 1100 | -------------------------------------------------------------------------------- /testsnap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #set -x #Echo commands for debugging 3 | 4 | LIMG="local.test.img" 5 | LMNT="btrfs.test.local" 6 | SUBVOLUME="./$LMNT/subvolume" 7 | SNAPSHOT="./$LMNT/.snapshot" 8 | 9 | RESULT=0 10 | 11 | header() { 12 | echo -e "\e[1m\e[44mHEADER: $1 \e[0m" 13 | } 14 | 15 | test_error() { 16 | # Increase Error count 17 | RESULT+=1 18 | # see here: https://misc.flogisoft.com/bash/tip_colors_and_formatting 19 | echo -e "\e[1m\e[41mERROR: \e[0m $3, Result: $1, Expected: $2" 20 | } 21 | 22 | test_ok() { 23 | echo -e "\e[1m\e[42mPASSED: \e[0m $3, Result: $1, Expected: $2" 24 | } 25 | 26 | test_equal() { 27 | if [ "$1" -eq "$2" ] 28 | then 29 | test_ok "$1" "$2" "$3" 30 | else 31 | test_error "$1" "$2" "$3" 32 | fi 33 | } 34 | 35 | setup_btrfs() { 36 | truncate -s 140M $LIMG 37 | 38 | mkfs.btrfs $LIMG 39 | 40 | mkdir $LMNT 41 | 42 | mount -o loop $LIMG $LMNT 43 | 44 | btrfs subvolume create "$SUBVOLUME" 45 | mkdir "$SNAPSHOT" 46 | mkdir "$LMNT/.sync" 47 | 48 | touch "$SUBVOLUME/file.file" 49 | head -c 1M "$SUBVOLUME/randomfile.file" 50 | 51 | } 52 | 53 | cleanup_btrfs (){ 54 | umount $LMNT 55 | rmdir $LMNT 56 | rm $LIMG 57 | } 58 | 59 | 60 | test_local_sync(){ 61 | for i in {1..20} 62 | do 63 | ./snapbtrex.py --path "$SNAPSHOT" --snap "$SUBVOLUME" --target-backups 10 --verbose --sync-target "./$LMNT/.sync/" --sync-keep 5 64 | test_equal "$?" 0 "Run: $i" 65 | sleep 1 66 | done 67 | 68 | # should be 10 dirs in .snapshot 69 | X=$(find $SNAPSHOT/* -maxdepth 0 -type d | wc -l) 70 | test_equal "$X" 10 "Keep Snapshot " 71 | 72 | # and 5 dirs in sync 73 | Y=$(find ./$LMNT/.sync/* -maxdepth 0 -type d | wc -l) 74 | test_equal "$Y" 5 "Sync keep" 75 | } 76 | 77 | test_local_latest(){ 78 | for i in {1..5} 79 | do 80 | ./snapbtrex.py --path "$SNAPSHOT" --snap "$SUBVOLUME" --target-backups 10 --keep-only-latest --verbose 81 | test_equal "$?" 0 "Run: $i" 82 | sleep 1 83 | done 84 | 85 | FIRST=$(find $SNAPSHOT/* -maxdepth 0 -type d | sort) 86 | echo "First snapshots:" 87 | echo "$FIRST" 88 | 89 | 90 | for i in {1..10} 91 | do 92 | ./snapbtrex.py --path "$SNAPSHOT" --snap "$SUBVOLUME" --target-backups 10 --keep-only-latest --verbose 93 | test_equal "$?" 0 "Run: $i" 94 | sleep 1 95 | done 96 | 97 | X=$(find $SNAPSHOT/* -maxdepth 0 -type d | wc -l) 98 | test_equal "$X" 10 "Keep Snapshot " 99 | 100 | # should be 10 dirs in .snapshot 101 | LAST=$(find $SNAPSHOT/* -maxdepth 0 -type d | sort) 102 | echo "Last snapshots:" 103 | echo "$LAST" 104 | 105 | count=$(echo "${FIRST[@]}" "${LAST[@]}" | sed 's/ /\n/g' | sort | uniq -d | wc -l) 106 | test_equal "$count" 0 "keep latest" 107 | } 108 | 109 | test_local_size(){ 110 | for i in {1..15} 111 | do 112 | head -c "${i}M" "$SUBVOLUME/randomfile.file" 113 | test_equal "$?" 0 "Run: $i adding bigger file" 114 | ./snapbtrex.py --path "$SNAPSHOT" --snap "$SUBVOLUME" --verbose --target-freespace 15M --keep-backups 1 115 | test_equal "$?" 0 "Run: $i Snapshot" 116 | show_size "./$LMNT/" 117 | sleep 1 118 | done 119 | } 120 | 121 | show_size() { 122 | df -h "$1" 123 | btrfs filesystem df "$1" 124 | python3 -c "import os; x=os.statvfs('$1'); print(x); print('size =' ,x.f_bsize*x.f_blocks, round(x.f_bsize*x.f_blocks/(1024**2),2) ); print('free =', x.f_bsize*x.f_bfree, round(x.f_bsize*x.f_bfree/(1024**2),2) , x.f_bfree/x.f_blocks); print('avail=', x.f_bsize*x.f_bavail, round(x.f_bsize*x.f_bavail/(1024**2),2), x.f_bavail/x.f_blocks);" 125 | } 126 | 127 | #### 128 | # Main 129 | #### 130 | 131 | # exit with error if not run as root 132 | if [[ $(id -u) -ne 0 ]] ; then 133 | test_error "$(id -u)" 0 "running as root" 134 | echo "testing needs privileged access to btrfs filesystem actions. please run as root" 135 | exit $RESULT 136 | fi 137 | 138 | # in case the last didn't clean all 139 | cleanup_btrfs 140 | 141 | header "Test local Sync" 142 | setup_btrfs 143 | test_local_sync 144 | cleanup_btrfs 145 | 146 | header "Test latest" 147 | setup_btrfs 148 | test_local_latest 149 | cleanup_btrfs 150 | 151 | header "Test Size" 152 | setup_btrfs 153 | test_local_size 154 | cleanup_btrfs 155 | 156 | exit $RESULT 157 | 158 | --------------------------------------------------------------------------------