├── .coveragerc ├── .github ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── python-black.yml │ └── python-package.yml ├── .gitignore ├── .travis.yml ├── LICENSE.mkd ├── README.mkd ├── docs ├── .gitignore ├── Makefile ├── _static │ └── theme_override.css ├── backup.rst ├── clean.rst ├── conf.py ├── config.rst ├── data_map.rst ├── index.rst ├── make.bat └── quickstart.rst ├── example ├── config.yml └── virt-backup-clean.service ├── pytest.ini ├── setup.cfg ├── setup.py ├── tests ├── conftest.py ├── helper │ ├── __init__.py │ ├── datetime.py │ ├── testdomain.xml │ └── virt_backup.py ├── test_compat_layers_config.py ├── test_compat_layers_definition.py ├── test_compat_layers_pending_info.py ├── test_complete_backup.py ├── test_complete_group.py ├── test_config.py ├── test_domain.py ├── test_group.py ├── test_main.py ├── test_packagers.py ├── test_pending_backup.py ├── test_snapshot.py ├── test_unsupported.py └── testconfig │ ├── config.yml │ └── versions │ ├── 0.4 │ ├── post.yml │ └── pre.yml │ └── full │ ├── 0.1.yml │ └── 0.4.yml ├── tox.ini ├── virt-backup └── virt_backup ├── __init__.py ├── __main__.py ├── backups ├── __init__.py ├── complete.py ├── packagers │ ├── __init__.py │ ├── directory.py │ ├── tar.py │ ├── unsupported.py │ └── zstd.py ├── pending.py └── snapshot.py ├── compat_layers ├── __init__.py ├── config.py ├── definition.py └── pending_info.py ├── config.py ├── domains.py ├── exceptions.py ├── groups ├── __init__.py ├── complete.py ├── pattern.py └── pending.py └── tools.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = virt_backup/tests/* 3 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 10 | -------------------------------------------------------------------------------- /.github/workflows/python-black.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Python black 5 | 6 | on: 7 | push: 8 | branches: [ "master" ] 9 | pull_request: 10 | branches: [ "master" ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: ["3.11"] 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | python -m pip install black 31 | - name: Black 32 | run: | 33 | black --check virt_backup tests 34 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ "master" ] 9 | pull_request: 10 | branches: [ "master" ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: ["3.9", "3.10", "3.11"] 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | python -m pip install flake8 pytest 31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 32 | sudo apt install python3-libvirt libvirt-dev 33 | python -m pip install tox tox-gh-actions 34 | - name: Tox min 35 | run: | 36 | TOXENV=min tox 37 | 38 | - name: Tox full 39 | run: | 40 | TOXENV=full tox 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #### joe made this: http://goel.io/joe 2 | #### python #### 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | env/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *,cover 48 | .hypothesis/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # IPython Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # dotenv 81 | .env 82 | 83 | # virtualenv 84 | venv/ 85 | ENV/ 86 | 87 | # Spyder project settings 88 | .spyderproject 89 | 90 | # Rope project settings 91 | .ropeproject 92 | 93 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | matrix: 4 | include: 5 | - python: 3.11-dev 6 | env: TOXENV=black 7 | - python: 3.11-dev 8 | env: TOXENV=coveralls 9 | - python: 3.9-dev 10 | env: TOXENV=full 11 | - python: 3.9-dev 12 | env: TOXENV=min 13 | - python: 3.10-dev 14 | env: TOXENV=full 15 | - python: 3.10-dev 16 | env: TOXENV=min 17 | - python: 3.11-dev 18 | env: TOXENV=full 19 | - python: 3.11-dev 20 | env: TOXENV=min 21 | 22 | before_install: 23 | - sudo apt-get -qq update 24 | - sudo apt-get install -y libvirt-dev 25 | 26 | install: 27 | - pip install -U --force-reinstall setuptools 28 | - pip install -U tox 29 | 30 | script: 31 | - tox 32 | -------------------------------------------------------------------------------- /LICENSE.mkd: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016, Anthony Ruhier All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 7 | 8 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9 | 10 | The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. 11 | -------------------------------------------------------------------------------- /README.mkd: -------------------------------------------------------------------------------- 1 | virt-backup 2 | =========== 3 | 4 | [![Build Status](https://travis-ci.org/aruhier/virt-backup.svg?branch=master)](https://travis-ci.org/aruhier/virt-backup) [![Coverage Status](https://coveralls.io/repos/github/aruhier/virt-backup/badge.svg?branch=master)](https://coveralls.io/github/aruhier/virt-backup?branch=master) 5 | 6 | Do external backup of your KVM guests, managed by libvirt, using the 7 | BlockCommit feature. The main goal is to do a modest alternative to the Proxmox 8 | VE backup system (without their vma system) to automatically backup your disks 9 | (with optional compression) and easily restore ones. Guests are configured by 10 | groups, and can be matched via regex. 11 | 12 | 13 | Documentation 14 | ------------- 15 | 16 | Documentation is available [here](https://virt-backup.readthedocs.io/). 17 | 18 | 19 | Installation 20 | ------------ 21 | 22 | Run: 23 | 24 | ``` 25 | pip3 install virt-backup 26 | ``` 27 | 28 | If you are running on ArchLinux, virt-backup is available through the AUR 29 | package `virt-backup`. 30 | virt-backup is tested under Python 3.5 and 3.6, 3.7. Python < 3.5 is not 31 | supported anymore, due to some deprecations in the used libraries. 32 | 33 | virt-backup should have access to every disk image desired to be backup. It 34 | should also be able to run `qemu-img` (normally installed with libvirt), as it 35 | is used to backup inactive domains. 36 | 37 | 38 | Configuration 39 | ------------- 40 | 41 | The configuration file is looked up into the following paths, in this specific 42 | order: `~/.config/virt-backup/config.yml`, `/etc/virt-backup/config.yml`, and, 43 | if you cloned this repository, in the project's root. 44 | 45 | A self-documented example is available in `example/config.yml`. 46 | 47 | 48 | Usage 49 | ----- 50 | 51 | Run the application by calling `virt-backup`: 52 | 53 | ``` 54 | $ virt-backup -h 55 | usage: virt-backup [-h] [-d] [--version] 56 | {backup,bak,restore,clean,cl,list,ls} ... 57 | 58 | Backup and restore your kvm libvirt domains 59 | 60 | positional arguments: 61 | {backup,bak,restore,clean,cl,list,ls} 62 | backup (bak) backup groups 63 | restore restore backup 64 | clean (cl) clean groups 65 | list (ls) list groups 66 | 67 | optional arguments: 68 | -h, --help show this help message and exit 69 | -d, --debug set the debug level 70 | --version show program's version number and exit 71 | ``` 72 | 73 | ### Backup 74 | 75 | Subcommand allowing to start the backup for all (except those with the 76 | `autostart` option disabled) or only the specified groups. 77 | 78 | ``` 79 | $ virt-backup backup -h 80 | usage: virt-backup backup [-h] [group [group ...]] 81 | 82 | positional arguments: 83 | group domain group to backup 84 | ``` 85 | 86 | For each domain matching a group, the following process is followed: 87 | 1. An external snapshot is created for all disks concerned by the backup, in 88 | order to freeze the images. For the same domain, all snapshots are created 89 | are the same time, so there is no inconsistency between disks. 90 | 2. A temporary file is created in the backup directory, containing all info 91 | to revert the backup if virt-backup crashed during the process (resulting 92 | in a broken backup and external snapshots that have not been removed). 93 | These backups can be cleaned by using the `clean` subcommand. 94 | 3. Images are copied. 95 | 4. BlockCommit is used to merge temporary external snapshots with their base 96 | disk, and to pivot to the original disk. If the domain is inactive, 97 | libvirt cannot achieve this step, so qemu-img is used. 98 | 5. Remove all temporary file. 99 | 100 | 101 | ### List 102 | 103 | List a short summary of multiple or all groups. If a domain name is specified, 104 | it will list all its backups, sorted by date. 105 | 106 | ``` 107 | $ virt-backup list -h 108 | usage: virt-backup list [-h] [-D domain_name] [-s] [group [group ...]] 109 | 110 | positional arguments: 111 | group domain group to list 112 | 113 | optional arguments: 114 | -D domain_name, --domain domain_name 115 | show list of backups for specific domain 116 | -a, --all show all domains matching, even without backup 117 | -s, --short short version, do not print details 118 | ``` 119 | 120 | By default, only domains with at least one backup will be listed, but all 121 | domains matching with the group rules can be printed by using the `-a/--all` 122 | option. 123 | 124 | ### Restore 125 | 126 | Restore a backup. If no date is specified, it will restore the last backup 127 | found for the specified group and domain. 128 | 129 | ``` 130 | $ virt-backup restore -h 131 | usage: virt-backup restore [-h] [--date date] group domain target_dir 132 | 133 | positional arguments: 134 | group domain group 135 | domain domain name 136 | target_dir destination path 137 | 138 | optional arguments: 139 | --date date backup date (default: last backup) 140 | ``` 141 | 142 | ### Clean 143 | 144 | Clean complete backups, depending on the retention policy (as defined for each 145 | group in the configuration), and broken backups. 146 | 147 | A systemd service is available in `example/virt-backup-clean.service` to 148 | trigger a cleaning of all broken backups at start. This way, if the hypervisor 149 | crashed during a backup, the service will clean all temporary files and pivot 150 | all disks on their original images (instead of running on a temporary 151 | external snapshot). 152 | 153 | ``` 154 | $ virt-backup clean -h 155 | usage: virt-backup clean [-h] [-b | -B] [group [group ...]] 156 | 157 | positional arguments: 158 | group domain group to clean 159 | 160 | optional arguments: 161 | -b, --broken-only only clean broken backups 162 | -B, --no-broken do not clean broken backups 163 | ``` 164 | 165 | License 166 | ------- 167 | 168 | Tool under the BSD license. Do not hesitate to report bugs, ask me some 169 | questions or do some pull request if you want to! 170 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aruhier/virt-backup/5416e7db0478cfa9a42515cdcabab2f93fa4dec4/docs/.gitignore -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = virt_backup 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/_static/theme_override.css: -------------------------------------------------------------------------------- 1 | .wy-nav-content { 2 | max-width: 80%; 3 | } 4 | -------------------------------------------------------------------------------- /docs/backup.rst: -------------------------------------------------------------------------------- 1 | .. _backup: 2 | 3 | ====== 4 | Backup 5 | ====== 6 | 7 | This page describes how the backup process works. 8 | 9 | .. contents:: Table of Contents 10 | :depth: 3 11 | 12 | Principle 13 | --------- 14 | 15 | - A complete backup is defined by its definition. A definition is a json file, stored next to the backup, containing 16 | informations such as the domain name, the disk backups, path to the backup, etc. This is the file listed when doing 17 | ``virt-backup list -D domain``. 18 | - A pending backup is defined by its ``pending_info``. The ``pending_info`` is a definition with some additional 19 | attributes computed when running the backup. It is stored next to the backup, and removed when the backup is 20 | complete. It is used to rebuild a temp backup if a crash happened, and clean everything. 21 | 22 | 23 | How it works 24 | ------------ 25 | 26 | When backuping multiple groups, first virt-backup will build all the groups with the given rules, then merge it into 27 | one. It allows to have a unique entry point to start everything, and deduplicate the similar backups. Read the 28 | :ref:`groups unicity section ` for more details. 29 | 30 | If multithreading is disabled, it then starts the backups one by one. However, if multithreading is enabled, a safety 31 | mechanism is followed if multiple backups target the same domain. Read the :ref:`groups multithreading section 32 | ` for more details. 33 | 34 | Then, each backups are started. For each backup, the first step is to create the backup directory, and take an external 35 | snapshot of all targeted disks (read the :ref:`domain external snapshot section ` for more 36 | details). This method is used in order to freeze the disks by the time virt-backup backup them, and then merge them 37 | back with the main disks and pivot the domains like before. There is however multiple inconvenient for that: if the 38 | VM is doing a lot of "remove" (freeing blocks), it's more operations as the external snapshot needs to log it. And it 39 | obviously requires temporarily more space. 40 | 41 | Then the pending info are dumped on disk, where the backup should be. This step allows to be able to clean 42 | the backup if virt-backup would happen to crash (by using ``virt-backup clean``). It contains the snapshot names and 43 | different informations that are known only when starting the backups. 44 | 45 | Now that the disks are frozen, they can be safely copied somewhere. This somewhere is defined by the packager (see the 46 | ``virt_backup.backups.packagers`` package). A packager is a way to store a backup, and expose a standard API so the 47 | backup does not have to care about it. Each disks are copied sequentially into the packager. 48 | 49 | The definition is dumped again, with all the final info. The pending info are removed, the external snapshots are 50 | cleaned (meaning for each snapshot, a blockcommit is triggered, the external snapshot is removed, the disk is pivot). 51 | 52 | If anything goes wrong during the backup, the external snapshot is cleaned, the pending info are removed such as 53 | everything created for the backup (only the backup directory is left). 54 | 55 | 56 | Groups 57 | ------ 58 | 59 | .. _backup_groups_unicity: 60 | 61 | Unicity 62 | ~~~~~~~ 63 | 64 | If multiple groups are backup and some share the same domains to backup, virt-backup will try to see if the backups 65 | could be compatible to avoid doing the exact same backup multiple times. 66 | 67 | Example of a groups configuration:: 68 | 69 | groups: 70 | group1: 71 | target: /mnt/kvm/backups 72 | 73 | packager: zstd 74 | packager_opts: 75 | compression_lvl: 6 76 | 77 | ## Hosts definition ## 78 | hosts: 79 | - "test1" 80 | 81 | group2: 82 | target: /mnt/kvm/backups 83 | 84 | packager: zstd 85 | packager_opts: 86 | compression_lvl: 6 87 | 88 | ## Hosts definition ## 89 | hosts: 90 | - "r:test.*" 91 | 92 | group3: 93 | target: /mnt/kvm/backups_disk1_only 94 | 95 | packager: tar 96 | 97 | ## Hosts definition ## 98 | hosts: 99 | - name: "test1" 100 | disks: 101 | - disk1 102 | 103 | Here `group1` and `group2` will try to backup the domain `test1` with all its disks, with the same compression 104 | parameters and to the same target directory. Therefore, `test1` can only be backup once. 105 | 106 | However, `group3` specifies that only the disk `disk1` of `test1` has to be backup, and put it in a tarfile in a 107 | different target directory. It is not considered as compatible with what `group1` and `group2` specify, therefore it 108 | will be backup a second time. 109 | 110 | Running a backup with this configuration will do 2 backups for `test1`: one shared between `group1` and `group2`, one 111 | for `group3`. 112 | 113 | .. _backup_groups_multithreading: 114 | 115 | Multithreading 116 | ~~~~~~~~~~~~~~ 117 | 118 | Backuping a group can be done in single thread or multithread. As a group can contain the same domain with different 119 | options, some safety have been done to avoid backuping the same domain in parallel. It is needed as the process relies 120 | on external snapshot, doing so would take an external snapshot of a snapshot (with the current implementation). 121 | 122 | As it is considered to be a rare case, all backups targeting the same domain are scheduled in a queue. If other domains 123 | are to backup, the backups in these queues are normally handled in parallel of other backups. 124 | 125 | .. _backup_dom_ext_snap: 126 | 127 | Domain external snapshot 128 | ------------------------ 129 | 130 | A custom helper is implemented to handle the external snapshots (see the ``virt_backup.backups.snapshot`` package). It 131 | uses libvirt to create it, then allows to remove it and pivot back to the main disk by using blockcommit (read `this 132 | libvirt example `_ for more details). 133 | 134 | Quiesce is an option when creating the snapshot. It allows to communicate with the virt agent present on the domain to 135 | force a sync of the disk before taking the snapshot. 136 | If Quiesce is wanted, when doing the snapshot, it first tries to do it with this option. If it fails, because for 137 | example there is no virt-agent running on this domain, it fallbacks on a snapshot without Quiesce (but logs an error). 138 | 139 | Pivoting back to the main disk depends if the domain is up or not. Libvirt does not allow a blockcommit on a shutdown 140 | domain. In this case, ``qemu-img`` is used directly to manually handle the blockcommit. Otherwise, libvirt API is used. 141 | 142 | To blockcommit, libvirt uses an event mechanism. Libvirt takes a function that it will call if there is an issue with 143 | the blockcommit, or if it's done. To centralize it, a custom helper ``DomExtSnapshotCallbackRegistrer`` is used (see 144 | the ``virt_backup.backups.snapshot`` package). It stores the callback to call per snapshot path, so when libvirt calls 145 | the register as a callback, it then look for the known snapshots and call the function to trigger a pivot. This 146 | function is handled by the ``DomExtSnapshot``, which aborts the blockjob and removes the snapshot. 147 | 148 | 149 | .. _backup_packagers: 150 | 151 | Packagers 152 | --------- 153 | 154 | Packagers in virt-backup are a common way to deal with storage. They are defined in the 155 | ``virt_backup.backup.packagers`` package. A packager can provide an abstracted way to deal with a folder, archive or 156 | else. 157 | 158 | Each packager is split in 2: 159 | 160 | - Read packager, inherited from ``virt_backup.backup.packagers._AbstractReadBackupPackager``. Provides mechanisms to 161 | list backups from a packager and restore a specific backup (by copying it to a given path). 162 | - Write packager, inherited from ``virt_backup.backup.packagers._AbstractWriteBackupPackager``. Provide mechanisms to 163 | add a new backup in a packager, delete the package and, when possible, remove a specific image from a backup. When 164 | the package is shareable between backups (for example, with a folder storing all the images of a domain), it also 165 | provide a way to remove a specific backup from the package. 166 | 167 | Splitting in read/write allows more safety when dealing with backups: the write packager is used only when the backup 168 | mechanism absolutely needs it, otherwise the read packager is used. 169 | 170 | Available packagers are: 171 | 172 | - ``directory``: store the images directly in a directory. Can be a directory per backup, or a directory shared for 173 | multiple backups. 174 | - ``tar``: store the backups in a tar archive. Can handle compression. 175 | - ``zstd``: store the backups in a zstd archive. Compression level is customizable. Can also handle multithreading for 176 | the compression itself. 177 | -------------------------------------------------------------------------------- /docs/clean.rst: -------------------------------------------------------------------------------- 1 | .. _clean: 2 | 3 | ================ 4 | Backups cleaning 5 | ================ 6 | 7 | This page describes how the process of cleaning the backups works. 8 | 9 | .. contents:: Table of Contents 10 | :depth: 3 11 | 12 | 13 | Remove a specific backup 14 | ------------------------ 15 | 16 | Not implemented yet in virt-backup. Removing a backup needs to be done manually, by removing the files. 17 | 18 | 19 | Clean outdated backups 20 | ---------------------- 21 | 22 | To be documented. 23 | 24 | For more details about the retention period, for now please read this comment in a github issue: https://github.com/aruhier/virt-backup/issues/38#issuecomment-659590425 25 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | import os 5 | import subprocess 6 | import sys 7 | import time 8 | 9 | 10 | # Fetch general information about the project. 11 | # Source: https://github.com/jaraco/skeleton/blob/skeleton/docs/conf.py 12 | root = os.path.realpath(os.path.join(os.path.dirname(__file__), "..")) 13 | setup_script = os.path.join(root, "setup.py") 14 | fields = ["--name", "--version", "--url", "--author"] 15 | dist_info_cmd = [sys.executable, setup_script] + fields 16 | output_bytes = subprocess.check_output(dist_info_cmd, cwd=root) 17 | project_id, version, url, author = output_bytes.decode("utf-8").strip().split("\n") 18 | 19 | # Title-case each word of the project ID. 20 | project = " ".join([word.title() for word in project_id.split("-")]) 21 | htmlhelp_basename = project_id 22 | 23 | release = version 24 | 25 | # Addons. 26 | extensions = [ 27 | "sphinx.ext.autodoc", 28 | "sphinx.ext.extlinks", 29 | "sphinx.ext.todo", 30 | "sphinx.ext.viewcode", 31 | ] 32 | 33 | master_doc = "index" 34 | 35 | # We use our own copyright template instead of the default as the latter strip 36 | # HTML content. 37 | html_show_copyright = False 38 | copyright = ( 39 | "2013-{}, {} and contributors" 41 | ).format(time.strftime("%Y"), author, url) 42 | 43 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 44 | 45 | nitpicky = True 46 | 47 | # We need a recent sphinx because of the last update format. 48 | needs_sphinx = "1.4" 49 | templates_path = ["templates"] 50 | 51 | # Keep the same ordering as in original source code. 52 | autodoc_member_order = "bysource" 53 | 54 | extlinks = { 55 | "issue": ("{}/issues/%s".format(url), "#"), 56 | "pull": ("{}/pull/%s".format(url), "PR #"), 57 | } 58 | 59 | # If true, `todo` and `todoList` produce output, else they produce nothing. 60 | todo_include_todos = True 61 | 62 | # Use RTD theme both locally and online. Source: https://github.com/snide 63 | # /sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs 64 | on_rtd = os.environ.get("READTHEDOCS", None) == "True" 65 | if not on_rtd: 66 | import sphinx_rtd_theme 67 | 68 | html_theme = "sphinx_rtd_theme" 69 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 70 | 71 | html_static_path = ["_static"] 72 | 73 | 74 | def setup(app): 75 | app.add_css_file("theme_override.css") 76 | -------------------------------------------------------------------------------- /docs/config.rst: -------------------------------------------------------------------------------- 1 | .. _config: 2 | 3 | ============= 4 | Configuration 5 | ============= 6 | 7 | This page describes how to configure virt-backup and goes in detail for each section. 8 | 9 | .. contents:: Table of Contents 10 | :depth: 3 11 | 12 | .. _configuration_full_example: 13 | 14 | Full example 15 | ------------ 16 | 17 | The configuration is a yaml file virtually split into 3 main sections: the global 18 | options, libvirt connection and backup groups. Here is a full example:: 19 | 20 | --- 21 | 22 | ######################## 23 | #### Global options #### 24 | ######################## 25 | 26 | ## Be more verbose ## 27 | debug: False 28 | 29 | ## How many threads (simultaneous backups) to run. Use 0 to use all CPU threads 30 | ## detected, 1 to disable multitheading for backups, or the number of threads 31 | ## wanted. Default: 1 32 | threads: 1 33 | 34 | 35 | ############################ 36 | #### Libvirt connection #### 37 | ############################ 38 | 39 | ## Libvirt URI ## 40 | uri: "qemu:///system" 41 | 42 | ## Libvirt authentication, if needed ## 43 | username: 44 | passphrase: 45 | 46 | 47 | ####################### 48 | #### Backup groups #### 49 | ####################### 50 | 51 | ## Groups are here to share the same backup options between multiple domains. 52 | ## That way, it is possible, for example, to have a different policy retention 53 | ## for a pool of guests in testing than for the one in production. 54 | 55 | ## Define default options for all groups. ## 56 | default: 57 | hourly: 1 58 | daily: 4 59 | weekly: 2 60 | monthly: 5 61 | yearly: 1 62 | 63 | ## Groups definition ## 64 | groups: 65 | ## Group name ## 66 | test: 67 | ## Backup directory ## 68 | target: /mnt/kvm/backups 69 | 70 | ## Packager to use for each backup: 71 | ## directory: images will be copied as they are, in a directory per domain 72 | ## tar: images will be packaged in a tar file 73 | ## zstd: images will be compressed with zstd. Requires python "zstandard" package to be installed. 74 | packager: tar 75 | 76 | ## Options for the choosen packager: 77 | ## tar: 78 | ## # Compression algorithm to use. Default to None. 79 | ## compression: None | "xz" | "gz" | "bz2" 80 | ## # Compression level to use for each backup. 81 | ## # Generally this should be an integer between 1~9 (depends on the 82 | ## # compression algorithm), where 1 will be the fastest while having 83 | ## # the lowest compression ratio, and 9 gives the best compression ratio 84 | ## # but takes the longest time to compress. 85 | ## compression_lvl: [1-9] 86 | ## 87 | ## zstd: 88 | ## # Compression level to use for each backup. 89 | ## # 1 will be the fastest while having the lowest compression ratio, 90 | ## # and 22 gives the best compression ratio but takes the longest time 91 | ## # to compress. 92 | ## compression_lvl: [1-22] 93 | packager_opts: 94 | compression: xz 95 | compression_lvl: 6 96 | 97 | ## When doing `virt-backup backup` without specifying any group, only 98 | ## groups with the autostart option enabled will be backup. 99 | autostart: True 100 | 101 | ## Retention policy: the first backup of the day is considered as the 102 | ## "daily" backup, first of the week "weekly", etc. The following options 103 | ## detail how many backups of each type has to be kept. Set to "*" or None for an 104 | ## infinite retention. 105 | ## Default to 5 for everything, meaning that calling "virt-backup clean" will let 5 106 | ## backups for each period not specified in the config. 107 | hourly: 5 108 | daily: 5 109 | weekly: 5 110 | monthly: 5 111 | yearly: 1 112 | 113 | ## Enable the Libvirt Quiesce option when taking the external snapshots. 114 | ## 115 | ## From Libvirt documentation: libvirt will try to freeze and unfreeze the guest 116 | ## virtual machine’s mounted file system(s), using the guest agent. However, if the 117 | ## guest virtual machine does not have a guest agent, snapshot creation will fail. 118 | ## 119 | ## However, virt-backup has a fallback mechanism if the snapshot happens to fail 120 | ## with Quiesce enabled, and retries without it. 121 | quiesce: True 122 | 123 | ## Hosts definition ## 124 | hosts: 125 | ## This policy will match the domain "domainname" in libvirt, and will 126 | ## backup the disks "vba" and "vdb" only. 127 | - host: domainname 128 | disks: 129 | - vda 130 | - vdb 131 | ## Quiesce option can also be overriden per host definition. 132 | quiesce: False 133 | ## Will backup all disks of "domainname2" ## 134 | - domainname2 135 | ## Regex that will match for all domains starting with "prod". The regex 136 | ## syntax is the same as the python one 137 | - "r:^prod.*" 138 | ## Exclude the domain domainname3 (useful with regex, for example) 139 | - "!domainname3" 140 | ## Exclude all domains starting with "test" 141 | - "!r:^test.*" 142 | 143 | # vim: set ts=2 sw=2: 144 | 145 | 146 | It can be saved as (the order defines the priority of the import): 147 | 148 | - ``~/.config/virt-backup/config.yml`` 149 | - ``/etc/virt-backup/config.yml`` 150 | 151 | 152 | Global options 153 | -------------- 154 | 155 | They define the global behavior of virt-backup: 156 | 157 | - ``debug``: if ``True``, virt-backup is more verbose. Enable this option (or use the 158 | global `-d` command line option) for bug reports. (Optional, default: ``False``) 159 | - ``threads``: how many simultaneous backups to run. Set it to the number of threads 160 | wanted, or 1 to disable multithreading, or 0 to use all CPU threads detected. 161 | (Optional, default: ``1``) 162 | 163 | 164 | Libvirt connection 165 | ------------------ 166 | 167 | They define the options to connect to libvirt: 168 | 169 | - ``uri``: libvirt URI: https://libvirt.org/uri.html 170 | - ``username``: connection username. (Optional) 171 | - ``password``: connection password. (Optional) 172 | 173 | virt-backup can technically connect to a distant Libvirt, but in order to actually 174 | backup the domain disks, it has to have access to the files. Therefore, it should run on 175 | the same hypervisor than Libvirt. 176 | 177 | 178 | Backup groups 179 | ------------- 180 | 181 | Groups domains allow to share the same backup options between multiple domains. 182 | This way, it is possible to define for example a different retention set or compression 183 | for a pool of domains in production than one in testing. 184 | 185 | - ``default``: dictionary containing all the default options for the groups. If a 186 | group redefines an option, it overrides it. 187 | - ``groups``: dictionary defining the groups. Groups are defined per names, and are 188 | themselves dictionary defining their options. 189 | 190 | Group options 191 | ~~~~~~~~~~~~~ 192 | 193 | - ``target``: backup directory. 194 | - ``packager``: which packager to use. Read the :ref:`Packagers section ` for more info. 195 | - ``packager_opts`` 196 | - ``autostart``: if ``True``, this group will be automatically backup when doing 197 | ``virt-backup backup`` without the need of specifying it. Otherwise, if set to 198 | ``False``, it needs to be specifically called (``virt-backup backup foo bar``). 199 | - ``hourly``, ``daily``, ``weekly``, ``monthly``, ``yearly``: retention policy. Read 200 | the :ref:`Retention section ` for more info. 201 | - ``quiesce``: Enable the Libvirt Quiesce option when taking the external snapshots. 202 | 203 | From Libvirt documentation: libvirt will try to freeze and unfreeze the guest virtual 204 | machine’s mounted file system(s), using the guest agent. However, if the guest virtual 205 | machine does not have a guest agent, snapshot creation will fail. 206 | 207 | However, virt-backup has a fallback mechanism if the snapshot happens to fail with 208 | Quiesce enabled, and retries without it. 209 | - ``hosts``: domains to include in this group. Read the :ref:`Hosts section ` for more info. 210 | 211 | 212 | .. _configuration_packagers: 213 | 214 | Packagers 215 | ^^^^^^^^^ 216 | 217 | Packagers define the storage mechanism. The existing packagers are: 218 | 219 | - ``directory``: images will be copied as they are, in a directory per domain 220 | - ``tar``: images will be packed into a tar file 221 | - ``zstd``: images will be compressed with zstd. Requires python ``zstandard`` library 222 | to be installed. 223 | 224 | Then, depending on the packager, some options can be set. 225 | 226 | Tar options: 227 | - ``compression``: set the compression algorithm for the tar archive. (Valid options: 228 | ``None`` | ``xz`` | ``gz`` | ``bz2``, default: ``None``) 229 | - ``compression_lvl``: set the compression level for the given algorithm. Generally 230 | this should be an integer between 1 and 9 (depends on the compression algorithm), where 231 | 1 will be the fastest while having the lowest compression ratio, and 9 gives the 232 | best compression ratio but takes the longest time to compress. 233 | 234 | For more info, read https://docs.python.org/3/library/tarfile.html. 235 | 236 | ZSTD options: 237 | - ``compression_lvl``: set the compression level, between 1 and 22. 1 will be the fastest while having 238 | the lowest compression ratio, and 22 gives the best compression ratio but takes the 239 | longest time to compress. 240 | 241 | 242 | .. _configuration_hosts: 243 | 244 | Hosts 245 | ^^^^^ 246 | 247 | The ``hosts`` option contain a list of domains to match for this group. Each item of this list can also limit the 248 | backup to specific disks, and override different options. 249 | 250 | To only do host matching:: 251 | 252 | hosts: 253 | # Will backup all disks of "domainname2" 254 | - domainname2 255 | # Regex that will match for all domains starting with "prod". The regex syntax is the same as the python one 256 | - "r:^prod.*" 257 | # Exclude the domain domainname3 (useful with regex, for example) 258 | - "!domainname3" 259 | # Exclude all domains starting with "test" 260 | - "!r:^test.*" 261 | 262 | To do a more detailed definition, and limit the host to only a list of disks:: 263 | 264 | hosts: 265 | - host: domainname 266 | disks: 267 | - vda 268 | - vdb 269 | ## Quiesce option can also be overriden per host definition. 270 | quiesce: False 271 | # It can still also be a regex. 272 | - host: "r:^prod.*" 273 | disks: 274 | - vda 275 | 276 | As shown in the example, exclusion is possible by adding ``!``. The order of definition does not matter, and exclusion 277 | will always take precedence over the inclusion. 278 | 279 | 280 | .. _configuration_retention: 281 | 282 | Retention 283 | ^^^^^^^^^ 284 | 285 | The available retention options define how many backups to keep per period when cleaning this group. The available time 286 | periods are: 287 | 288 | - ``hourly`` 289 | - ``daily`` 290 | - ``weekly`` 291 | - ``monthly`` 292 | - ``yearly`` 293 | 294 | The default value is ``5`` for everything. 295 | 296 | The first backup of the hour is called an ``hourly`` backup, first of the day is ``daily``, etc. 297 | Setting ``daily`` to ``2`` would mean to keep the first backups of the day of the last 2 days. ``weekly`` to ``2`` 298 | would mean to keep the first backup of the week of the last 2 weeks. 299 | 300 | `The last 2 days/weeks/etc.` is here a simplification in the explanation. Please read the :ref:`backups cleaning 301 | documentation ` to get a full explanation of the cleaning process. 302 | 303 | -------------------------------------------------------------------------------- /docs/data_map.rst: -------------------------------------------------------------------------------- 1 | .. _data_map: 2 | 3 | ========= 4 | Data maps 5 | ========= 6 | 7 | This page lists the custom data defined and used by virt-backup, and their schema. 8 | 9 | .. contents:: Table of Contents 10 | :depth: 3 11 | 12 | 13 | Compatibility layers 14 | -------------------- 15 | 16 | In order to ensure that virt-backup can read old backups, old configurations and pending 17 | datas, it uses compatibility layers. 18 | 19 | Compatibility layers are defined in ``virt_backup.compatibility_layers``, and each data 20 | has its own package. 21 | 22 | Compatibility layers can use a range of version if the data allows it. A configuration 23 | doesn't define any version for example, so its compatibility layers will be executed 24 | iteratively. However, Definitions and Pending Info contained a version, therefore only 25 | the compatibility layers between its version and the last one will be ran. 26 | 27 | Depending the data, warnings can be shown to the user to apply the migrations 28 | themselves. Configuration for example will indicate the needed steps to migrate the 29 | configuration file. Things will still run if it is not migrated, but the support of old 30 | configurations can be dropped in the future. 31 | 32 | To ensure that old data can be migrated to a last wanted state, some tests run all the 33 | compatibility layers (``tests/test_compat_layers_*``). 34 | 35 | .. _data_map_configuration: 36 | 37 | 38 | Configuration 39 | ------------- 40 | 41 | The configuration file is a yaml file used by virt-backup in ``virt_backup.config.Config``:: 42 | 43 | # Be more verbose. 44 | # Default: False 45 | debug: bool 46 | 47 | # How many threads (simultaneous backups) to run. Use 0 to use all CPU threads 48 | # detected, 1 to disable multitheading for backups, or the number of threads wanted. 49 | # Default: 1 50 | threads: int 51 | 52 | 53 | ############################ 54 | #### Libvirt connection #### 55 | ############################ 56 | 57 | # Libvirt URI. 58 | uri: str 59 | 60 | # Libvirt authentication, if needed. 61 | username: str 62 | passphrase: str 63 | 64 | 65 | ####################### 66 | #### Backup groups #### 67 | ####################### 68 | 69 | # Groups are here to share the same backup options between multiple domains. 70 | # That way, it is possible, for example, to have a different policy retention 71 | # for a pool of guests in testing than for the one in production. 72 | 73 | # Define default options for all groups. 74 | default: 75 | target: str 76 | packager: str 77 | packager_opts: dict{packager_option: value} 78 | quiesce: bool 79 | hourly: int 80 | daily: int 81 | weekly: int 82 | monthly: int 83 | yearly: int 84 | 85 | # Groups definition. 86 | groups: 87 | # Group name 88 | str: 89 | # Backup directory. 90 | target: str 91 | 92 | # Packager to use for each backup: 93 | packager: str 94 | 95 | # Options for the choosen packager: 96 | packager_opts: dict{packager_option: value} 97 | 98 | # When doing `virt-backup backup` without specifying any group, only groups with 99 | # the autostart option enabled will be backup. 100 | # Default: False 101 | autostart: bool 102 | 103 | # Retention policy: the first backup of the day is considered as the 104 | # "daily" backup, first of the week "weekly", etc. The following options 105 | # detail how many backups of each type has to be kept. Set to "*" or None for an 106 | # infinite retention. 107 | # Default: 108 | # hourly: 5 109 | # daily: 5 110 | # weekly: 5 111 | # monthly: 5 112 | # yearly: 5 113 | hourly: int 114 | daily: int 115 | weekly: int 116 | monthly: int 117 | yearly: int 118 | 119 | # Enable the Libvirt Quiesce option when taking the external snapshots. 120 | # 121 | # From Libvirt documentation: libvirt will try to freeze and unfreeze the guest 122 | # virtual machine’s mounted file system(s), using the guest agent. However, if the 123 | # guest virtual machine does not have a guest agent, snapshot creation will fail. 124 | # 125 | # However, virt-backup has a fallback mechanism if the snapshot happens to fail 126 | # with Quiesce enabled, and retries without it. 127 | quiesce: bool 128 | 129 | # Hosts definition. 130 | hosts: 131 | # Can either be a dictionary or a str. 132 | - host: str 133 | disks: []str 134 | quiesce: bool 135 | # If a str, can be the domain name, or a regex. 136 | - str 137 | 138 | 139 | Backup definition 140 | ----------------- 141 | 142 | A backup definition is a JSON file defining a backup. It is stored next to the backup 143 | package to quickly get all the needed information about it, without the need of 144 | unpacking anything:: 145 | 146 | { 147 | name: str, 148 | domain_id: int, 149 | domain_name: str, 150 | // Dump of the libvirt definition of the targeted domain. 151 | domain_xml: str, 152 | disks: { disk_name : backup_disk_name }, 153 | version: str, 154 | date: int, 155 | packager: { 156 | type: str, 157 | opts: {}, 158 | }, 159 | } 160 | 161 | Example:: 162 | 163 | { 164 | "name": "20191001-003401_3_test-domain", 165 | "domain_id": 3, 166 | "domain_name": "test-domain", 167 | "domain_xml": "", 168 | "disks": { 169 | "vda": "20191001-003401_3_test-domain_vda.qcow2", 170 | }, 171 | "version": "0.4.0", 172 | "date": 1569890041, 173 | "packager": { 174 | "type": "tar", 175 | "opts": { 176 | "compression": "gz", 177 | "compression_lvl": 6, 178 | }, 179 | }, 180 | } 181 | 182 | 183 | Pending data 184 | ------------ 185 | 186 | Pending data is a temporary backup definition, following the same structure but with a bit more information in order to 187 | clean everything if something failed:: 188 | 189 | { 190 | name: str, 191 | domain_id: int, 192 | domain_name: str, 193 | // Dump of the libvirt definition of the targeted domain. 194 | domain_xml: str, 195 | disks: { 196 | disk_name : { 197 | src: str, 198 | snapshot: str, 199 | target: str, 200 | } 201 | }, 202 | version: str, 203 | date: int, 204 | packager: { 205 | type: str, 206 | opts: {}, 207 | }, 208 | } 209 | 210 | Example:: 211 | 212 | { 213 | "name": "20191001-003401_3_test-domain", 214 | "domain_id": 3, 215 | "domain_name": "test-domain", 216 | "domain_xml": "", 217 | "disks": { 218 | "vda": { 219 | "src": "/tmp/test/vda.qcow2", 220 | "snapshot": "/tmp/test/vda.qcow2.snap", 221 | "target": "20191001-003401_3_test-domain_vda.qcow2", 222 | }, 223 | }, 224 | "version": "0.4.0", 225 | "date": 1569890041, 226 | "packager": { 227 | "type": "tar", 228 | "opts": { 229 | "compression": "gz", 230 | "compression_lvl": 6, 231 | }, 232 | }, 233 | } 234 | 235 | The structure is the closest as possible from the backup definition. 236 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to virt-backup's documentation! 2 | ======================================= 3 | 4 | virt-backup does hot external backups of your `Libvirt `_ guests, using the 5 | BlockCommit feature. The goal is to do an automatic backup system, with 6 | optional compression, and be able to easily restore a backup. 7 | 8 | virt-backup is based around groups: a group contains a list of domains to backup, that can be matched by regex. 9 | Each group contains its own configuration, specifying how to store the backups (compression, directory, etc.), 10 | where to store them, the retention by period of time when a cleanup is called, etc. 11 | 12 | 13 | Features 14 | -------- 15 | 16 | * Hot backup one or multiple qemu/raw disk, snapshoting everything at the same time. 17 | * Cold backup a qemu/raw disk. 18 | * Multithreading: can backup multiple domains in parallel. 19 | * Supports multiple targets for backups: 20 | 21 | * Directory: just copies images in a directory. 22 | * Tar: stores all images of a backup in a tar file (with optional xz/gz/bzip2 compression). 23 | * ZSTD: compresses the images using ZSTD algorithm (supports multithreading). 24 | 25 | * Restore a backup to a folder. 26 | * List all backups, by VM name. 27 | * Clean backup, with configurable time retention (number of backups to keep, 28 | per domain, per hours/day/weeks/months/years) 29 | 30 | 31 | Limitations 32 | ----------- 33 | 34 | * Only supports file type disks (qemu, raw, etc.). Does not support LVM or any block disk. 35 | * Does not handle Libvirt external snapshots. BackingStores are just ignored 36 | and only the current running disk is backup. 37 | * virt-backup has to run on each hypervisor. It has to be able to read the 38 | disks in order to backup them, and it uses the same disk path as configured 39 | in Libvirt. 40 | 41 | 42 | .. toctree:: 43 | :maxdepth: 2 44 | :caption: Contents: 45 | 46 | quickstart 47 | config 48 | backup 49 | data_map 50 | clean 51 | 52 | Indices and tables 53 | ================== 54 | * :ref:`genindex` 55 | * :ref:`modindex` 56 | * :ref:`search` 57 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=vpn_user_mgmt 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/quickstart.rst: -------------------------------------------------------------------------------- 1 | .. _quickstart: 2 | 3 | ========== 4 | Quickstart 5 | ========== 6 | 7 | .. currentmodule:: virt_backup 8 | 9 | virt-backup has 4 main functions: 10 | - :ref:`backup ` 11 | - :ref:`list backups ` 12 | - :ref:`restore ` 13 | - :ref:`clean backups ` 14 | 15 | This page describes how to install virt-backup, create a generic configuration then how to use these 4 functions. 16 | 17 | .. contents:: Table of Contents 18 | :depth: 3 19 | 20 | 21 | Installation 22 | ------------ 23 | 24 | Run:: 25 | 26 | pip3 install virt-backup 27 | 28 | Or by using setuptools:: 29 | 30 | python3 ./setup.py install 31 | 32 | virt-backup is Python 3 compatible only. 33 | 34 | 35 | Configuration 36 | ------------- 37 | 38 | .. _quickstart_configuration: 39 | 40 | 41 | virt-backup is based around the definition of groups. Groups can include or exclude as many domains as needed, 42 | and define the backup properties: compression, disks to backup, where to store the backups, retention, etc.. 43 | 44 | Groups definition is the biggest part of the configuration. 45 | 46 | The configuration is a yaml file. Here is a quite generic one:: 47 | 48 | --- 49 | 50 | ######################## 51 | #### Global options #### 52 | ######################## 53 | 54 | ## Be more verbose ## 55 | debug: False 56 | 57 | ## How many threads (simultaneous backups) to run. Use 0 to use all CPU threads 58 | ## detected, 1 to disable multitheading for backups, or the number of threads 59 | ## wanted. Default: 1 60 | threads: 1 61 | 62 | 63 | ############################ 64 | #### Libvirt connection #### 65 | ############################ 66 | 67 | ## Libvirt URI ## 68 | uri: "qemu:///system" 69 | 70 | ## Libvirt authentication, if needed ## 71 | username: 72 | passphrase: 73 | 74 | 75 | ####################### 76 | #### Backup groups #### 77 | ####################### 78 | 79 | ## Groups are here to share the same backup options between multiple domains. 80 | ## That way, it is possible, for example, to have a different policy retention 81 | ## for a pool of guests in testing than for the one in production. 82 | 83 | ## Define default options for all groups. ## 84 | ## Here we set the retention parameters for each VM when calling `virt-backup clean`. 85 | default: 86 | hourly: 1 87 | daily: 4 88 | weekly: 2 89 | monthly: 5 90 | yearly: 1 91 | 92 | ## Groups definition ## 93 | groups: 94 | ## Group name ## 95 | test: 96 | ## Backup directory ## 97 | target: /mnt/kvm/backups 98 | 99 | ## Use ZSTD compression, configured at lvl 6 100 | packager: zstd 101 | packager_opts: 102 | compression_lvl: 6 103 | 104 | ## When doing `virt-backup backup` without specifying any group, only 105 | ## groups with the autostart option enabled will be backup. 106 | autostart: True 107 | 108 | ## Enable the Libvirt Quiesce option when taking the external snapshots. 109 | ## 110 | ## From Libvirt documentation: libvirt will try to freeze and unfreeze the guest 111 | ## virtual machine’s mounted file system(s), using the guest agent. However, if the 112 | ## guest virtual machine does not have a guest agent, snapshot creation will fail. 113 | ## 114 | ## However, virt-backup has a fallback mechanism if the snapshot happens to fail 115 | ## with Quiesce enabled, and retries without it. 116 | quiesce: True 117 | 118 | ## Hosts definition ## 119 | hosts: 120 | ## Will backup everything. 121 | - "r:.*" 122 | 123 | # vim: set ts=2 sw=2: 124 | 125 | 126 | Adapt it and save it either as: 127 | 128 | - ``~/.config/virt-backup/config.yml`` 129 | - ``/etc/virt-backup/config.yml`` 130 | 131 | 132 | Backup 133 | ------ 134 | 135 | .. _quickstart_backup: 136 | 137 | All groups set with the `autostart` option to `True` can be started by running:: 138 | 139 | $ virt-backup backup 140 | 141 | A specific group (``test``) can be started by running:: 142 | 143 | $ virt-backup backup test 144 | 145 | The group has to be defined in the configuration. 146 | 147 | Multiple groups can be ran with:: 148 | 149 | $ virt-backup backup group1 group2 […] 150 | 151 | 152 | List 153 | ---- 154 | 155 | .. _quickstart_list: 156 | 157 | To list the backups for all groups, as a summary:: 158 | 159 | $ virt-backup list 160 | 161 | generic 162 | ========= 163 | 164 | Total backups: 2 hosts, 22 backups 165 | Hosts: 166 | vm-foo-0: 11 backup(s) 167 | vm-bar-0: 11 backup(s) 168 | 169 | test 170 | ====== 171 | 172 | Total backups: 1 hosts, 11 backups 173 | Hosts: 174 | vm-foo-1: 11 backup(s) 175 | 176 | To have a really short summary for all groups:: 177 | 178 | $ virt-backup list -s 179 | 180 | generic 181 | ========= 182 | 183 | Total backups: 9 hosts, 99 backups 184 | 185 | test 186 | ====== 187 | 188 | Total backups: 1 hosts, 11 backups 189 | 190 | By default, only domains with at least one backup will be listed, but all domains matching with the group rules can be 191 | printed by using the ``-a/--all`` option. 192 | 193 | To list exactly all the backups done for one domain, here ``vm-foo-0``:: 194 | 195 | $ virt-backup list -D vm-foo-0 196 | 197 | generic 198 | ========= 199 | 200 | vm-foo-0: 11 backup(s) 201 | 2020-09-17T01:02:53+00:00: /backups/vm-foo-0/20200917-010253_8_vm-foo-0.json 202 | 2020-09-16T01:02:56+00:00: /backups/vm-foo-0/20200916-010256_8_vm-foo-0.json 203 | 2020-09-15T01:02:39+00:00: /backups/vm-foo-0/20200915-010239_8_vm-foo-0.json 204 | 2020-09-14T01:02:34+00:00: /backups/vm-foo-0/20200914-010234_8_vm-foo-0.json 205 | 2020-09-07T01:03:07+00:00: /backups/vm-foo-0/20200907-010307_8_vm-foo-0.json 206 | 2020-09-01T01:02:22+00:00: /backups/vm-foo-0/20200901-010222_8_vm-foo-0.json 207 | 2020-08-01T01:02:20+00:00: /backups/vm-foo-0/20200801-010220_8_vm-foo-0.json 208 | 2020-07-01T00:55:01+00:00: /backups/vm-foo-0/20200701-005501_3_vm-foo-0.json 209 | 2020-06-01T00:55:02+00:00: /backups/vm-foo-0/20200601-005502_3_vm-foo-0.json 210 | 2020-05-01T00:55:01+00:00: /backups/vm-foo-0/20200501-005501_3_vm-foo-0.json 211 | 2020-04-01T00:55:01+00:00: /backups/vm-foo-0/20200401-005501_3_vm-foo-0.json 212 | 213 | Which lists when the backup was taken, and where its definition file is stored. If the domain matches multiple groups, 214 | backups will be listed per group. 215 | 216 | 217 | 218 | Restore 219 | ------- 220 | 221 | .. _quickstart_restore: 222 | 223 | To restore the last backup of a domain (``vm-foo-0``) part of a given group (``generic``), and extract the result in the given target destination (``~/disks``):: 224 | 225 | $ virt-backup restore generic vm-foo-0 ~/disks 226 | 227 | Which extracts everything backuped to ``~/disks``. 228 | 229 | To extract a specific backup, its date can be specified (``2020-09-17T01:02:53+00:00``):: 230 | 231 | $ virt-backup restore --date 2020-09-17T01:02:53+00:00 generic vm-foo-0 ~/disks 232 | 233 | The format is for the moment non convenient and some work will be needed to facilitate it. For the moment, the exact 234 | date and format as given by ``virt-backup list`` has to be used. 235 | 236 | 237 | Clean 238 | ----- 239 | 240 | .. _quickstart_clean: 241 | 242 | It is possible to automatically clean old backups, by following the configured :ref:`rentention policy 243 | `, but also broken backups (for which the backup process was not correctly interrupted, by a 244 | crash or server shutdown for example). 245 | 246 | To clean old and broken backups for all groups:: 247 | 248 | $ virt-backup clean 249 | 250 | To limit the cleaning to one group only (``test``):: 251 | 252 | $ virt-backup clean test 253 | 254 | 255 | To only clean the broken backups, but not handle the old (correct) backups:: 256 | 257 | $ virt-backup clean -b 258 | 259 | Opposite situation, to not clean the broken backups but only handle the old (correct) backups:: 260 | 261 | $ virt-backup clean -B 262 | 263 | A systemd service is available in `example/virt-backup-clean.service 264 | `_ to trigger a 265 | cleaning of all broken backups at start. This way, if the hypervisor crashed during a backup, the service will clean 266 | all temporary files and pivot all disks to their original images (instead of running on a temporary external snapshot). 267 | -------------------------------------------------------------------------------- /example/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ######################## 4 | #### Global options #### 5 | ######################## 6 | 7 | ## Be more verbose ## 8 | debug: False 9 | 10 | ## How many threads (simultaneos backups) to run. Use 0 to use all CPU threads 11 | ## detected, 1 to disable multitheading for backups, or the number of threads 12 | ## wanted. Default: 1 13 | threads: 1 14 | 15 | 16 | ############################ 17 | #### Libvirt connection #### 18 | ############################ 19 | 20 | ## Libvirt URI ## 21 | uri: "qemu:///system" 22 | 23 | ## Libvirt authentication, if needed ## 24 | username: 25 | passphrase: 26 | 27 | 28 | ####################### 29 | #### Backup groups #### 30 | ####################### 31 | 32 | ## Groups are here to share the same backup options between multiple domains. 33 | ## That way, it is possible, for example, to have a different policy retention 34 | ## for a pool of guests in testing than for the one in production. 35 | 36 | ## Define default options for all groups. ## 37 | default: 38 | hourly: 1 39 | daily: 4 40 | weekly: 2 41 | monthly: 5 42 | yearly: 1 43 | 44 | ## Groups definition ## 45 | groups: 46 | ## Group name ## 47 | test: 48 | ## Backup directory ## 49 | target: /mnt/kvm/backups 50 | 51 | ## Packager to use for each backup: 52 | ## directory: images will be copied as they are, in a directory per domain 53 | ## tar: images will be packaged in a tar file 54 | ## zstd: images will be compressed with zstd. Requires python "zstandard" package to be installed. 55 | packager: tar 56 | 57 | ## Options for the choosen packager: 58 | ## tar: 59 | ## # Compression algorithm to use. Default to None. 60 | ## compression: None | "xz" | "gz" | "bz2" 61 | ## # Compression level to use for each backup. 62 | ## # Generally this should be an integer between 1~9 (depends on the 63 | ## # compression algorithm), where 1 will be the fastest while having 64 | ## # the lowest compression ratio, and 9 gives the best compression ratio 65 | ## # but takes the longest time to compress. 66 | ## compression_lvl: [1-9] 67 | ## 68 | ## zstd: 69 | ## # Compression level to use for each backup. 70 | ## # 1 will be the fastest while having the lowest compression ratio, 71 | ## # and 22 gives the best compression ratio but takes the longest time 72 | ## # to compress. 73 | ## compression_lvl: [1-22] 74 | packager_opts: 75 | compression: xz 76 | compression_lvl: 6 77 | 78 | ## When doing `virt-backup backup` without specifying any group, only 79 | ## groups with the autostart option enabled will be backup. 80 | autostart: True 81 | 82 | ## Retention policy: the first backup of the day is considered as the 83 | ## "daily" backup, first of the week "weekly", etc. The following options 84 | ## detail how many backups of each type has to be kept. Set to "*" or None for an 85 | ## infinite retention. 86 | ## Default to 5 for everything, meaning that calling "virt-backup clean" will let 5 87 | ## backups for each period not specified in the config. 88 | hourly: 5 89 | daily: 5 90 | weekly: 5 91 | monthly: 5 92 | yearly: 1 93 | 94 | ## Enable the Libvirt Quiesce option when taking the external snapshots. 95 | ## 96 | ## From Libvirt documentation: libvirt will try to freeze and unfreeze the guest 97 | ## virtual machine’s mounted file system(s), using the guest agent. However, if the 98 | ## guest virtual machine does not have a guest agent, snapshot creation will fail. 99 | ## 100 | ## However, virt-backup has a fallback mechanism if the snapshot happens to fail 101 | ## with Quiesce enabled, and retries without it. 102 | quiesce: True 103 | 104 | ## Hosts definition ## 105 | hosts: 106 | ## This policy will match the domain "domainname" in libvirt, and will 107 | ## backup the disks "vba" and "vdb" only. 108 | - host: domainname 109 | disks: 110 | - vda 111 | - vdb 112 | ## Quiesce option can also be overriden per host definition. 113 | quiesce: False 114 | ## Will backup all disks of "domainname2" ## 115 | - domainname2 116 | ## Regex that will match for all domains starting with "prod". The regex 117 | ## syntax is the same as the python one 118 | - "r:^prod.*" 119 | ## Exclude the domain domainname3 (useful with regex, for example) 120 | - "!domainname3" 121 | ## Exclude all domains starting with "test" 122 | - "!r:^test.*" 123 | 124 | # vim: set ts=2 sw=2: 125 | -------------------------------------------------------------------------------- /example/virt-backup-clean.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Clean broken backups done by virt-backup 3 | Wants=libvirtd.service 4 | After=network.target 5 | After=libvirtd.service 6 | 7 | [Service] 8 | ExecStart=/usr/bin/virt-backup clean -b 9 | Type=oneshot 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | extra: tests for optional dependencies/modules 4 | no_extra: tests to run when optional dependencies are not installed 5 | no_zstd: tests to run when zstd modules not installed (included in no_extra) 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.5.6 3 | commit = True 4 | tag = True 5 | 6 | [aliases] 7 | test = pytest --extras --addopts "-m 'not no_extra'" 8 | testmin = pytest --addopts "-m 'not extra'" 9 | testv = pytest --addopts "-v --durations=10 -m 'not no_extra'" --extras 10 | testd = pytest --addopts "--pdb -m 'not no_extra'" --extras 11 | testlf = pytest --addopts "--lf -m 'not no_extra'" --extras 12 | testcov = pytest --addopts "--cov virt_backup --cov-config .coveragerc -m 'not no_extra'" --extras 13 | 14 | [bumpversion:file:setup.py] 15 | search = version="{current_version}" 16 | replace = version="{new_version}" 17 | 18 | [bumpversion:file:virt_backup/__init__.py] 19 | search = VERSION = "{current_version}" 20 | replace = VERSION = "{new_version}" 21 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Automatic backups for libvirt 5 | See: 6 | https://github.com/Anthony25/virt-backup 7 | """ 8 | 9 | from os import path 10 | from setuptools import find_packages, setup 11 | 12 | here = path.abspath(path.dirname(__file__)) 13 | 14 | setup( 15 | name="virt-backup", 16 | version="0.5.6", 17 | description="Automatic backups for libvirt", 18 | url="https://github.com/Anthony25/virt-backup", 19 | author="Anthony25 ", 20 | author_email="anthony.ruhier@gmail.com", 21 | license="Simplified BSD", 22 | classifiers=[ 23 | "Operating System :: POSIX :: Linux", 24 | "Programming Language :: Python :: 3 :: Only", 25 | "License :: OSI Approved :: BSD License", 26 | ], 27 | keywords="libvirt", 28 | packages=find_packages(exclude=["example", "tests"]), 29 | install_requires=[ 30 | "appdirs", 31 | "arrow", 32 | "libvirt-python", 33 | "lxml", 34 | "packaging", 35 | "PyYAML", 36 | ], 37 | setup_requires=[ 38 | "pytest-runner", 39 | ], 40 | tests_require=["pytest", "pytest-cov", "pytest-mock", "deepdiff"], 41 | extras_require={ 42 | "zstd": ["zstandard"], 43 | }, 44 | entry_points={ 45 | "console_scripts": [ 46 | "virt-backup = virt_backup.__main__:cli_run", 47 | ], 48 | }, 49 | ) 50 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from virt_backup.backups import DomBackup, DomExtSnapshotCallbackRegistrer 3 | from virt_backup.groups import BackupGroup 4 | from helper.virt_backup import MockDomain, MockConn, build_completed_backups 5 | 6 | 7 | @pytest.fixture 8 | def build_mock_domain(mocker): 9 | return MockDomain(_conn=mocker.stub()) 10 | 11 | 12 | @pytest.fixture 13 | def build_stopped_mock_domain(build_mock_domain): 14 | build_mock_domain.set_state(4, 1) 15 | return build_mock_domain 16 | 17 | 18 | @pytest.fixture 19 | def build_mock_libvirtconn(): 20 | return MockConn() 21 | 22 | 23 | @pytest.fixture 24 | def build_mock_libvirtconn_filled(build_mock_libvirtconn): 25 | conn = build_mock_libvirtconn 26 | domain_names = ("a", "b", "vm-10", "matching", "matching2") 27 | conn._domains = [ 28 | MockDomain(name=dom_name, _conn=conn, id=id) 29 | for id, dom_name in enumerate(domain_names) 30 | ] 31 | return conn 32 | 33 | 34 | @pytest.fixture 35 | def build_backup_directory(tmpdir): 36 | domain_names, backup_dates = build_completed_backups(str(tmpdir)) 37 | return { 38 | "domain_names": domain_names, 39 | "backup_dates": backup_dates, 40 | "backup_dir": tmpdir, 41 | } 42 | 43 | 44 | @pytest.fixture 45 | def get_dombackup(build_mock_domain, build_mock_libvirtconn): 46 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(build_mock_libvirtconn) 47 | return DomBackup(build_mock_domain, callbacks_registrer=callbacks_registrer) 48 | 49 | 50 | @pytest.fixture 51 | def get_uncompressed_dombackup(build_mock_domain, build_mock_libvirtconn): 52 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(build_mock_libvirtconn) 53 | return DomBackup( 54 | dom=build_mock_domain, 55 | dev_disks=("vda",), 56 | packager="directory", 57 | callbacks_registrer=callbacks_registrer, 58 | ) 59 | 60 | 61 | @pytest.fixture 62 | def get_compressed_dombackup(build_mock_domain, build_mock_libvirtconn): 63 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(build_mock_libvirtconn) 64 | return DomBackup( 65 | dom=build_mock_domain, 66 | dev_disks=("vda",), 67 | packager="tar", 68 | packager_opts={"compression": "xz", "compression_lvl": 4}, 69 | callbacks_registrer=callbacks_registrer, 70 | ) 71 | 72 | 73 | @pytest.fixture 74 | def get_backup_group(build_mock_domain, build_mock_libvirtconn): 75 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(build_mock_libvirtconn) 76 | return BackupGroup( 77 | build_mock_libvirtconn, 78 | domlst=((build_mock_domain, None),), 79 | callbacks_registrer=callbacks_registrer, 80 | ) 81 | -------------------------------------------------------------------------------- /tests/helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aruhier/virt-backup/5416e7db0478cfa9a42515cdcabab2f93fa4dec4/tests/helper/__init__.py -------------------------------------------------------------------------------- /tests/helper/datetime.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | 4 | class MockDatetime(datetime.datetime): 5 | fixed_date = None 6 | 7 | @classmethod 8 | def now(cls): 9 | return cls.fixed_date 10 | -------------------------------------------------------------------------------- /tests/helper/testdomain.xml: -------------------------------------------------------------------------------- 1 | 2 | test 3 | 59710e20-e403-45f1-959b-b64b53e2d213 4 | test domain 5 | 6 | hvm 7 | 8 | 9 | /usr/sbin/qemu-system-x86_64 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 |
19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 |
36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /tests/helper/virt_backup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import arrow 4 | import libvirt 5 | import lxml 6 | import lxml.etree 7 | 8 | from virt_backup.backups import ( 9 | DomBackup, 10 | DomExtSnapshotCallbackRegistrer, 11 | ReadBackupPackagers, 12 | WriteBackupPackagers, 13 | ) 14 | from virt_backup.groups import BackupGroup 15 | 16 | 17 | CUR_PATH = os.path.dirname(os.path.realpath(__file__)) 18 | 19 | 20 | class MockDomain: 21 | """ 22 | Simulate a libvirt domain 23 | """ 24 | 25 | def XMLDesc(self): 26 | """ 27 | Return the definition of a testing domain 28 | """ 29 | return lxml.etree.tostring(self.dom_xml, pretty_print=True).decode() 30 | 31 | def ID(self): 32 | return self.dom_xml.get("id") 33 | 34 | def UUID(self): 35 | return self.dom_xml.get("uuid") 36 | 37 | def name(self): 38 | return self.dom_xml.xpath("name")[0].text 39 | 40 | def state(self): 41 | return self._state 42 | 43 | def isActive(self): 44 | state_id = self.state()[0] 45 | return state_id >= 1 and state_id <= 3 46 | 47 | def set_name(self, name): 48 | elem_name = self.dom_xml.xpath("name")[0] 49 | if elem_name is None: 50 | elem_name = self.dom_xml.makeelement("name") 51 | self.dom_xml.insert(0, elem_name) 52 | elem_name.text = name 53 | 54 | def set_id(self, id): 55 | self.dom_xml.set("id", str(id)) 56 | 57 | def set_uuid(self, uuid): 58 | self.dom_xml.set("uuid", uuid) 59 | 60 | def set_state(self, state_id, reason_id): 61 | self._state = [state_id, reason_id] 62 | 63 | def set_storage_basedir(self, basedir): 64 | """ 65 | Change the basedir of all attached disks 66 | 67 | :param basedir: new basedir 68 | """ 69 | for elem in self.dom_xml.xpath("devices/disk"): 70 | try: 71 | if ( 72 | elem.get("device", None) == "disk" 73 | and elem.get("type", None) == "file" 74 | ): 75 | src = elem.xpath("source")[0] 76 | img = src.get("file") 77 | new_path = os.path.join(basedir, os.path.basename(img)) 78 | src.set("file", new_path) 79 | except IndexError: 80 | continue 81 | 82 | def snapshotCreateXML(self, xmlDesc, flags=0): 83 | return self._mock_snapshot(xmlDesc, flags) 84 | 85 | def set_mock_snapshot_create(self, mock): 86 | self._mock_snapshot = mock 87 | 88 | def updateDeviceFlags(self, xml, flags): 89 | new_device_xml = lxml.etree.fromstring( 90 | xml, lxml.etree.XMLParser(resolve_entities=False) 91 | ) 92 | 93 | address = new_device_xml.get("address") 94 | device_to_replace = self._find_device_with_address(address) 95 | 96 | self.dom_xml.xpath("devices")[0].replace(device_to_replace, new_device_xml) 97 | 98 | def _find_device_with_address(self, address): 99 | for elem in self.dom_xml.xpath("devices/*"): 100 | try: 101 | if elem.get("address", None) == address: 102 | return elem 103 | except IndexError: 104 | continue 105 | raise Exception("Device not found") 106 | 107 | def __init__(self, _conn, name="test", id=1, *args, **kwargs): 108 | self._conn = _conn 109 | self._state = [1, 1] 110 | self._mock_snapshot = lambda *args: MockSnapshot(name) 111 | 112 | with open(os.path.join(CUR_PATH, "testdomain.xml")) as dom_xmlfile: 113 | self.dom_xml = lxml.etree.fromstring( 114 | dom_xmlfile.read(), lxml.etree.XMLParser(resolve_entities=False) 115 | ) 116 | self.set_id(id) 117 | self.set_name(name) 118 | self.set_uuid(kwargs.get("uuid", str(uuid.uuid4()))) 119 | 120 | 121 | class MockSnapshot: 122 | def getName(self): 123 | return self._name 124 | 125 | def __init__(self, name): 126 | self._name = name 127 | 128 | 129 | class MockConn: 130 | """ 131 | Simulate a libvirt connection 132 | """ 133 | 134 | _libvirt_version = 3000000 135 | 136 | def listAllDomains(self): 137 | return self._domains 138 | 139 | def lookupByName(self, name): 140 | for d in self._domains: 141 | if d.name() == name: 142 | return d 143 | raise libvirt.libvirtError("Domain not found") 144 | 145 | def defineXML(self, xml): 146 | md = MockDomain(_conn=self) 147 | md.dom_xml = lxml.etree.fromstring( 148 | xml, lxml.etree.XMLParser(resolve_entities=False) 149 | ) 150 | for _, d in enumerate(self._domains): 151 | if d.ID() == md.ID(): 152 | d.dom_xml = md.dom_xml 153 | return d 154 | 155 | self._domains.append(md) 156 | return md 157 | 158 | def getLibVersion(self): 159 | return self._libvirt_version 160 | 161 | def __init__(self, _domains=None, *args, **kwargs): 162 | self._domains = _domains or [] 163 | 164 | 165 | def build_complete_backup_files_from_domainbackup(dbackup, date): 166 | """ 167 | :returns definition: updated definition from backuped files 168 | """ 169 | definition = dbackup.get_definition() 170 | definition["date"] = date.int_timestamp 171 | definition["disks"] = {} 172 | 173 | backup_dir = dbackup.backup_dir 174 | definition["path"] = backup_dir 175 | 176 | definition["name"] = dbackup._main_backup_name_format( 177 | arrow.get(definition["date"]).to("local") 178 | ) 179 | packager = dbackup._get_write_packager(definition["name"]) 180 | 181 | with packager: 182 | for disk in dbackup.disks: 183 | # create empty files as our backup images 184 | img_name = "{}.qcow2".format( 185 | dbackup._disk_backup_name_format(date, disk), 186 | ) 187 | definition["disks"][disk] = img_name 188 | 189 | img_complete_path = os.path.join(backup_dir, img_name) 190 | with open(img_complete_path, "w"): 191 | pass 192 | if dbackup.packager != "directory": 193 | packager.add(img_complete_path, img_name) 194 | os.remove(img_complete_path) 195 | return definition 196 | 197 | 198 | def build_completed_backups(backup_dir): 199 | domain_names = ("a", "b", "vm-10", "matching", "matching2") 200 | backup_properties = ( 201 | (arrow.get("2016-07-08 19:40:02").to("local"), "directory", {}), 202 | (arrow.get("2016-07-08 18:40:02").to("local"), "directory", {}), 203 | (arrow.get("2016-07-08 18:30:02").to("local"), "directory", {}), 204 | (arrow.get("2016-07-08 17:40:02").to("local"), "directory", {}), 205 | (arrow.get("2016-07-07 19:40:02").to("local"), "directory", {}), 206 | (arrow.get("2016-07-07 21:40:02").to("local"), "directory", {}), 207 | (arrow.get("2016-07-06 20:40:02").to("local"), "directory", {}), 208 | (arrow.get("2016-04-08 19:40:02").to("local"), "directory", {}), 209 | (arrow.get("2014-05-01 00:30:00").to("local"), "tar", {}), 210 | (arrow.get("2016-03-08 14:28:13").to("local"), "tar", {"compression": "xz"}), 211 | ) 212 | conn = MockConn() 213 | for domain_id, domain_name in enumerate(domain_names): 214 | domain_bdir = os.path.join(backup_dir, domain_name) 215 | os.mkdir(domain_bdir) 216 | domain = MockDomain(conn, name=domain_name, id=domain_id) 217 | dbackup = build_dombackup(domain, domain_bdir, dev_disks=("vda", "vdb")) 218 | 219 | for bakdate, packager, packager_opts in backup_properties: 220 | dbackup.packager = packager 221 | dbackup.packager_opts = packager_opts.copy() 222 | 223 | definition = build_complete_backup_files_from_domainbackup(dbackup, bakdate) 224 | dbackup._dump_json_definition(definition) 225 | # create a bad json file 226 | with open(os.path.join(domain_bdir, "badfile.json"), "w"): 227 | pass 228 | 229 | return (domain_names, (bp[0] for bp in backup_properties)) 230 | 231 | 232 | def build_dombackup(dom, *dombackup_args, **dombackup_kwargs): 233 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(dom._conn) 234 | return DomBackup( 235 | dom, 236 | *dombackup_args, 237 | callbacks_registrer=callbacks_registrer, 238 | **dombackup_kwargs 239 | ) 240 | 241 | 242 | def build_backup_group(conn, *group_args, **group_kwargs): 243 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(conn) 244 | return BackupGroup( 245 | *group_args, callbacks_registrer=callbacks_registrer, **group_kwargs 246 | ) 247 | -------------------------------------------------------------------------------- /tests/test_compat_layers_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import os 3 | import yaml 4 | import pytest 5 | 6 | from deepdiff import DeepDiff 7 | from virt_backup import config 8 | from virt_backup.compat_layers.config import convert_warn, ToV0_4 9 | 10 | CUR_PATH = os.path.dirname(os.path.realpath(__file__)) 11 | TESTCONF_PATH = os.path.join(CUR_PATH, "testconfig/versions") 12 | 13 | 14 | class _BaseTestConfigConverter(ABC): 15 | @property 16 | @abstractmethod 17 | def target(self): 18 | pass 19 | 20 | @property 21 | @abstractmethod 22 | def converter(self): 23 | pass 24 | 25 | def get_config(self, config_type: str): 26 | path = os.path.join(TESTCONF_PATH, self.target, "{}.yml".format(config_type)) 27 | return config.get_config(path) 28 | 29 | def test_convert(self): 30 | pre = self.get_config("pre") 31 | post = self.get_config("post") 32 | 33 | self.converter.convert(pre) 34 | diff = DeepDiff(pre, post) 35 | assert not diff, "diff found between converted config and expected config" 36 | 37 | 38 | class TestV0_1ToV0_4(_BaseTestConfigConverter): 39 | target = "0.4" 40 | converter = ToV0_4() 41 | 42 | 43 | def test_convert_warn(): 44 | """ 45 | Test conversion from the minimum version supported to the last version supported. 46 | """ 47 | pre = config.get_config(os.path.join(TESTCONF_PATH, "full", "0.1.yml")) 48 | post = config.get_config(os.path.join(TESTCONF_PATH, "full", "0.4.yml")) 49 | 50 | convert_warn(pre) 51 | diff = DeepDiff(pre, post) 52 | assert not diff, "diff found between converted config and expected config" 53 | -------------------------------------------------------------------------------- /tests/test_compat_layers_definition.py: -------------------------------------------------------------------------------- 1 | from deepdiff import DeepDiff 2 | from packaging.version import parse as version_parser 3 | import pytest 4 | 5 | from virt_backup.compat_layers.definition import convert, ToV0_4 6 | 7 | 8 | class TestV0_1ToV0_4: 9 | @pytest.mark.parametrize( 10 | "definition,expected", 11 | [ 12 | ( 13 | { 14 | "compression": "gz", 15 | "compression_lvl": 6, 16 | "domain_id": 3, 17 | "domain_name": "test-domain", 18 | "version": "0.1.0", 19 | "date": 1569890041, 20 | "tar": "20191001-003401_3_test-domain.tar.gz", 21 | }, 22 | { 23 | "name": "20191001-003401_3_test-domain", 24 | "domain_id": 3, 25 | "domain_name": "test-domain", 26 | "version": "0.4.0", 27 | "date": 1569890041, 28 | "packager": { 29 | "type": "tar", 30 | "opts": { 31 | "compression": "gz", 32 | "compression_lvl": 6, 33 | }, 34 | }, 35 | }, 36 | ), 37 | ( 38 | { 39 | "domain_id": 3, 40 | "domain_name": "test-domain", 41 | "version": "0.1.0", 42 | "date": 1569890041, 43 | }, 44 | { 45 | "name": "20191001-003401_3_test-domain", 46 | "domain_id": 3, 47 | "domain_name": "test-domain", 48 | "version": "0.4.0", 49 | "date": 1569890041, 50 | "packager": { 51 | "type": "directory", 52 | "opts": {}, 53 | }, 54 | }, 55 | ), 56 | ( 57 | { 58 | "name": "20191001-003401_3_test-domain", 59 | "domain_id": 3, 60 | "domain_name": "test-domain", 61 | "version": "0.4.0", 62 | "date": 1569890041, 63 | "packager": { 64 | "type": "tar", 65 | "opts": {}, 66 | }, 67 | }, 68 | { 69 | "name": "20191001-003401_3_test-domain", 70 | "domain_id": 3, 71 | "domain_name": "test-domain", 72 | "version": "0.4.0", 73 | "date": 1569890041, 74 | "packager": { 75 | "type": "tar", 76 | "opts": {}, 77 | }, 78 | }, 79 | ), 80 | ], 81 | ) 82 | def test_convert(self, definition, expected): 83 | c = ToV0_4() 84 | c.convert(definition) 85 | 86 | diff = DeepDiff(definition, expected) 87 | assert not diff, "diff found between converted and expected definition" 88 | 89 | @pytest.mark.parametrize( 90 | "version,expected", 91 | [ 92 | ("0.1.0", True), 93 | ("0.4.0", False), 94 | ("0.5.0", False), 95 | ], 96 | ) 97 | def test_is_needed(self, version, expected): 98 | c = ToV0_4() 99 | assert c.is_needed(version_parser(version)) == expected 100 | 101 | 102 | @pytest.mark.parametrize( 103 | "definition,expected", 104 | [ 105 | ( 106 | { 107 | "compression": "gz", 108 | "compression_lvl": 6, 109 | "domain_id": 3, 110 | "domain_name": "test-domain", 111 | "domain_xml": "", 112 | "disks": { 113 | "vda": "20191001-003401_3_test-domain_vda.qcow2", 114 | }, 115 | "version": "0.1.0", 116 | "date": 1569890041, 117 | "tar": "20191001-003401_3_test-domain.tar.gz", 118 | }, 119 | { 120 | "name": "20191001-003401_3_test-domain", 121 | "domain_id": 3, 122 | "domain_name": "test-domain", 123 | "domain_xml": "", 124 | "disks": { 125 | "vda": "20191001-003401_3_test-domain_vda.qcow2", 126 | }, 127 | "version": "0.4.0", 128 | "date": 1569890041, 129 | "packager": { 130 | "type": "tar", 131 | "opts": { 132 | "compression": "gz", 133 | "compression_lvl": 6, 134 | }, 135 | }, 136 | }, 137 | ), 138 | ( 139 | { 140 | "domain_id": 3, 141 | "domain_name": "test-domain", 142 | "version": "0.1.0", 143 | "date": 1569890041, 144 | }, 145 | { 146 | "name": "20191001-003401_3_test-domain", 147 | "domain_id": 3, 148 | "domain_name": "test-domain", 149 | "version": "0.4.0", 150 | "date": 1569890041, 151 | "packager": { 152 | "type": "directory", 153 | "opts": {}, 154 | }, 155 | }, 156 | ), 157 | ( 158 | { 159 | "name": "20191001-003401_3_test-domain", 160 | "domain_id": 3, 161 | "domain_name": "test-domain", 162 | "version": "0.4.0", 163 | "date": 1569890041, 164 | "packager": { 165 | "type": "tar", 166 | "opts": {}, 167 | }, 168 | }, 169 | { 170 | "name": "20191001-003401_3_test-domain", 171 | "domain_id": 3, 172 | "domain_name": "test-domain", 173 | "version": "0.4.0", 174 | "date": 1569890041, 175 | "packager": { 176 | "type": "tar", 177 | "opts": {}, 178 | }, 179 | }, 180 | ), 181 | ], 182 | ) 183 | def test_convert(definition, expected): 184 | """ 185 | Test conversion from the minimum version supported to the last version supported. 186 | """ 187 | convert(definition) 188 | diff = DeepDiff(definition, expected) 189 | assert not diff, "diff found between converted definition and expected definition" 190 | -------------------------------------------------------------------------------- /tests/test_complete_backup.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import filecmp 3 | import os 4 | import tarfile 5 | import arrow 6 | import pytest 7 | 8 | from virt_backup.backups import build_dom_complete_backup_from_def 9 | from virt_backup.domains import get_domain_disks_of 10 | from virt_backup.exceptions import DomainRunningError 11 | 12 | from helper.virt_backup import build_complete_backup_files_from_domainbackup 13 | 14 | 15 | def transform_dombackup_to_dom_complete_backup(dombkup): 16 | definition = build_complete_backup_files_from_domainbackup(dombkup, arrow.now()) 17 | 18 | return build_dom_complete_backup_from_def(definition, dombkup.backup_dir) 19 | 20 | 21 | @pytest.fixture 22 | def get_uncompressed_complete_backup(get_uncompressed_dombackup, tmpdir): 23 | dombkup = get_uncompressed_dombackup 24 | dombkup.backup_dir = str(tmpdir) 25 | 26 | return transform_dombackup_to_dom_complete_backup(dombkup) 27 | 28 | 29 | @pytest.fixture 30 | def get_compressed_complete_backup(get_compressed_dombackup, tmpdir): 31 | dombkup = get_compressed_dombackup 32 | dombkup.backup_dir = str(tmpdir) 33 | 34 | return transform_dombackup_to_dom_complete_backup(dombkup) 35 | 36 | 37 | @pytest.fixture 38 | def build_bak_definition(get_uncompressed_dombackup): 39 | dombkup = get_uncompressed_dombackup 40 | 41 | return get_and_tweak_def_from_dombackup(dombkup) 42 | 43 | 44 | @pytest.fixture 45 | def build_bak_definition_with_compression(get_compressed_dombackup): 46 | dombkup = get_compressed_dombackup 47 | 48 | return get_and_tweak_def_from_dombackup(dombkup) 49 | 50 | 51 | def get_and_tweak_def_from_dombackup(dombkup, date=None): 52 | definition = dombkup.get_definition() 53 | if date is None: 54 | date = datetime.datetime.now() 55 | definition["date"] = date.timestamp() 56 | definition["name"] = dombkup._main_backup_name_format(date) 57 | 58 | return definition 59 | 60 | 61 | def test_get_complete_backup_from_def(build_bak_definition_with_compression): 62 | definition = build_bak_definition_with_compression 63 | complete_backup = build_dom_complete_backup_from_def(definition, backup_dir="./") 64 | 65 | assert complete_backup.dom_xml == definition["domain_xml"] 66 | 67 | 68 | class TestDomCompleteBackup: 69 | def test_cancel(self, get_dombackup): 70 | get_dombackup.cancel() 71 | 72 | assert get_dombackup._cancel_flag.is_set() 73 | 74 | def test_restore_disk_in_domain( 75 | self, get_uncompressed_complete_backup, build_stopped_mock_domain, tmpdir 76 | ): 77 | backup = get_uncompressed_complete_backup 78 | domain = build_stopped_mock_domain 79 | 80 | src_img = backup.get_complete_path_of(backup.disks["vda"]) 81 | domain.set_storage_basedir(str(tmpdir)) 82 | dst_img = get_domain_disks_of(domain.XMLDesc(), "vda")["vda"]["src"] 83 | 84 | backup.restore_and_replace_disk_of("vda", domain, "vda") 85 | 86 | assert filecmp.cmp(src_img, dst_img) 87 | assert ( 88 | get_domain_disks_of(domain.XMLDesc())["vda"]["type"] 89 | == get_domain_disks_of(backup.dom_xml)["vda"]["type"] 90 | ) 91 | 92 | def test_restore_disk_in_running_domain( 93 | self, get_uncompressed_complete_backup, build_mock_domain 94 | ): 95 | backup = get_uncompressed_complete_backup 96 | domain = build_mock_domain 97 | 98 | with pytest.raises(DomainRunningError): 99 | backup.restore_and_replace_disk_of("vda", domain, "vda") 100 | 101 | def test_restore_to(self, get_uncompressed_complete_backup, tmpdir): 102 | """ 103 | Test with a not compressed backup 104 | """ 105 | backup = get_uncompressed_complete_backup 106 | target_dir = tmpdir.mkdir("extract") 107 | 108 | return self.restore_to(backup, target_dir) 109 | 110 | def test_restore_to_with_tar(self, get_compressed_complete_backup, tmpdir): 111 | """ 112 | Test with a not compressed backup 113 | """ 114 | backup = get_compressed_complete_backup 115 | target_dir = tmpdir.mkdir("extract") 116 | 117 | return self.restore_to(backup, target_dir) 118 | 119 | def restore_to(self, complete_backup, target): 120 | complete_backup.restore_to(str(target)) 121 | 122 | # there should be 1 .xml file + all disks 123 | assert len(target.listdir()) == 1 + len(complete_backup.disks) 124 | 125 | def test_restore_disk_to_dir(self, get_uncompressed_complete_backup, tmpdir): 126 | backup = get_uncompressed_complete_backup 127 | src_img = backup.get_complete_path_of(backup.disks["vda"]) 128 | extract_dir = tmpdir.mkdir("extract") 129 | dst_img = os.path.join(str(extract_dir), backup.disks["vda"]) 130 | 131 | backup.restore_disk_to("vda", str(extract_dir)) 132 | 133 | assert filecmp.cmp(src_img, dst_img) 134 | 135 | def test_restore_disk_to(self, get_uncompressed_complete_backup, tmpdir): 136 | """ 137 | Test with a not compressed backup 138 | """ 139 | backup = get_uncompressed_complete_backup 140 | src_img = backup.get_complete_path_of(backup.disks["vda"]) 141 | extract_dir = tmpdir.mkdir("extract") 142 | dst_img = os.path.join(str(extract_dir), "vda.img") 143 | 144 | backup.restore_disk_to("vda", dst_img) 145 | 146 | assert filecmp.cmp(src_img, dst_img) 147 | 148 | def test_restore_replace_domain( 149 | self, get_uncompressed_complete_backup, build_mock_libvirtconn 150 | ): 151 | conn = build_mock_libvirtconn 152 | backup = get_uncompressed_complete_backup 153 | 154 | backup.restore_replace_domain(conn) 155 | 156 | def test_restore_domain_to( 157 | self, get_uncompressed_complete_backup, build_mock_libvirtconn 158 | ): 159 | """ 160 | Test to restore the domain to a specific id 161 | """ 162 | conn = build_mock_libvirtconn 163 | backup = get_uncompressed_complete_backup 164 | 165 | # TODO: check if id of the new domain matches 166 | backup.restore_replace_domain(conn, id=13) 167 | 168 | def test_restore_compressed_disk_to(self, get_compressed_complete_backup, tmpdir): 169 | """ 170 | Test with a compressed backup 171 | """ 172 | backup = get_compressed_complete_backup 173 | extract_dir = tmpdir.mkdir("extract") 174 | dst_img = os.path.join(str(extract_dir), backup.disks["vda"]) 175 | 176 | backup.restore_disk_to("vda", dst_img) 177 | src_img = self.extract_disk_from_backup_packager(backup, "vda") 178 | 179 | assert filecmp.cmp(src_img, dst_img, shallow=False) 180 | 181 | def test_restore_compressed_disk_to_dir( 182 | self, get_compressed_complete_backup, tmpdir 183 | ): 184 | """ 185 | Test with a compressed backup 186 | """ 187 | backup = get_compressed_complete_backup 188 | extract_dir = tmpdir.mkdir("extract") 189 | dst_img = os.path.join(str(extract_dir), backup.disks["vda"]) 190 | 191 | backup.restore_disk_to("vda", str(extract_dir)) 192 | src_img = self.extract_disk_from_backup_packager(backup, "vda") 193 | 194 | assert filecmp.cmp(src_img, dst_img, shallow=False) 195 | 196 | def extract_disk_from_backup_packager(self, backup, disk): 197 | packager = backup._get_packager() 198 | dest = backup.get_complete_path_of(backup.disks[disk]) 199 | with packager: 200 | packager.restore(backup.disks[disk], dest) 201 | 202 | return dest 203 | 204 | def test_get_complete_backup_from_def(self, get_uncompressed_complete_backup): 205 | backup = get_uncompressed_complete_backup 206 | 207 | complete_path_of_vda = backup.get_complete_path_of(backup.disks["vda"]) 208 | expected_path = os.path.join(backup.backup_dir, backup.disks["vda"]) 209 | 210 | assert complete_path_of_vda == expected_path 211 | 212 | def test_delete(self, get_uncompressed_complete_backup): 213 | backup = get_uncompressed_complete_backup 214 | backup.delete() 215 | 216 | assert not os.path.exists(backup.backup_dir) 217 | -------------------------------------------------------------------------------- /tests/test_complete_group.py: -------------------------------------------------------------------------------- 1 | import arrow 2 | import json 3 | import os 4 | import pytest 5 | 6 | from virt_backup.groups import CompleteBackupGroup, complete_groups_from_dict 7 | from virt_backup.groups.complete import list_backups_by_domain 8 | from virt_backup.backups import DomBackup, DomExtSnapshotCallbackRegistrer 9 | from virt_backup.exceptions import BackupNotFoundError 10 | 11 | 12 | class TestCompleteBackupGroup: 13 | def test_scan_backup_dir(self, build_backup_directory): 14 | backup_dir = str(build_backup_directory["backup_dir"]) 15 | backups_def = list_backups_by_domain(str(backup_dir)) 16 | 17 | group = CompleteBackupGroup(name="test", backup_dir=backup_dir, hosts=("r:.*",)) 18 | group.scan_backup_dir() 19 | 20 | assert sorted(group.backups.keys()) == sorted(backups_def.keys()) 21 | for dom in group.backups: 22 | assert len(group.backups[dom]) == len(backups_def[dom]) 23 | 24 | def test_scan_backup_dir_without_host(self, build_backup_directory): 25 | backup_dir = str(build_backup_directory["backup_dir"]) 26 | 27 | group = CompleteBackupGroup(name="test", backup_dir=backup_dir, hosts=tuple()) 28 | group.scan_backup_dir() 29 | 30 | assert not group.backups.keys() 31 | 32 | def test_scan_backup_dir_several_patterns(self, build_backup_directory): 33 | backup_dir = str(build_backup_directory["backup_dir"]) 34 | backups_def = list_backups_by_domain(str(backup_dir)) 35 | 36 | # g: should do nothing for now, but test if passing 37 | group = CompleteBackupGroup( 38 | name="test", backup_dir=backup_dir, hosts=("a", "r:^[b-z].*", "g:all") 39 | ) 40 | group.scan_backup_dir() 41 | 42 | assert group.backups 43 | assert sorted(group.backups.keys()) == sorted(backups_def.keys()) 44 | for dom in group.backups: 45 | assert len(group.backups[dom]) == len(backups_def[dom]) 46 | 47 | def test_get_backup_at_date(self, build_backup_directory): 48 | group = self.prepare_get_backup_at_date(build_backup_directory) 49 | 50 | domain_name = next(iter(group.backups.keys())) 51 | testing_date = arrow.get("2016-07-08 17:40:02") 52 | 53 | backup = group.get_backup_at_date(domain_name, testing_date) 54 | assert backup.date == testing_date 55 | 56 | def test_get_backup_at_date_unexisting(self, build_backup_directory): 57 | group = self.prepare_get_backup_at_date(build_backup_directory) 58 | 59 | domain_name = next(iter(group.backups.keys())) 60 | testing_date = arrow.get("2016-07-09 17:40:02") 61 | 62 | with pytest.raises(BackupNotFoundError): 63 | group.get_backup_at_date(domain_name, testing_date) 64 | 65 | def prepare_get_backup_at_date(self, build_backup_directory): 66 | backup_dir = str(build_backup_directory["backup_dir"]) 67 | group = CompleteBackupGroup(name="test", backup_dir=backup_dir, hosts=["r:.*"]) 68 | group.scan_backup_dir() 69 | 70 | return group 71 | 72 | def test_get_nearest_backup_of(self, build_backup_directory): 73 | backup_dir = str(build_backup_directory["backup_dir"]) 74 | group = CompleteBackupGroup(name="test", backup_dir=backup_dir, hosts=["r:.*"]) 75 | group.scan_backup_dir() 76 | 77 | domain_name = next(iter(group.backups.keys())) 78 | testing_date = arrow.get("2015") 79 | nearest_backup = group.get_n_nearest_backup(domain_name, testing_date, 1)[0] 80 | 81 | difference = abs(testing_date - nearest_backup.date) 82 | for b in group.backups[domain_name]: 83 | assert abs(testing_date - b.date) >= difference 84 | 85 | def test_clean(self, build_backup_directory): 86 | backup_dir = str(build_backup_directory["backup_dir"]) 87 | group = CompleteBackupGroup(name="test", backup_dir=backup_dir, hosts=["r:.*"]) 88 | group.scan_backup_dir() 89 | nb_initial_backups = sum(len(b) for b in group.backups.values()) 90 | 91 | cleaned = group.clean(hourly=2, daily=3, weekly=1, monthly=1, yearly=2) 92 | backups_def = list_backups_by_domain(str(backup_dir)) 93 | expected_dates = sorted( 94 | ( 95 | arrow.get("2016-07-08 19:40:02").to("local"), 96 | arrow.get("2016-07-08 18:30:02").to("local"), 97 | arrow.get("2016-07-08 17:40:02").to("local"), 98 | arrow.get("2016-07-07 19:40:02").to("local"), 99 | arrow.get("2016-07-06 20:40:02").to("local"), 100 | arrow.get("2016-03-08 14:28:13").to("local"), 101 | arrow.get("2014-05-01 00:30:00").to("local"), 102 | ) 103 | ) 104 | 105 | for domain, backups in group.backups.items(): 106 | dates = sorted(b.date for b in backups) 107 | assert dates == expected_dates 108 | assert len(backups_def[domain]) == len(backups) 109 | 110 | nb_remaining_backups = sum(len(b) for b in group.backups.values()) 111 | assert len(cleaned) == nb_initial_backups - nb_remaining_backups 112 | 113 | def test_clean_unset_period(self, build_backup_directory): 114 | """ 115 | Test if cleaning works if some periods are not set. 116 | 117 | Related to issue #27 118 | """ 119 | backup_dir = str(build_backup_directory["backup_dir"]) 120 | group = CompleteBackupGroup(name="test", backup_dir=backup_dir, hosts=["r:.*"]) 121 | group.scan_backup_dir() 122 | 123 | group.clean(daily=3, monthly=1, yearly=2) 124 | expected_dates = sorted( 125 | ( 126 | arrow.get("2014-05-01 00:30:00").to("local"), 127 | arrow.get("2016-03-08 14:28:13").to("local"), 128 | arrow.get("2016-04-08 19:40:02").to("local"), 129 | arrow.get("2016-07-06 20:40:02").to("local"), 130 | arrow.get("2016-07-07 19:40:02").to("local"), 131 | arrow.get("2016-07-07 21:40:02").to("local"), 132 | arrow.get("2016-07-08 17:40:02").to("local"), 133 | arrow.get("2016-07-08 18:30:02").to("local"), 134 | arrow.get("2016-07-08 19:40:02").to("local"), 135 | ) 136 | ) 137 | 138 | for domain, backups in group.backups.items(): 139 | dates = sorted(b.date for b in backups) 140 | assert dates == expected_dates 141 | 142 | def test_clean_broken( 143 | self, build_backup_directory, build_mock_domain, build_mock_libvirtconn, mocker 144 | ): 145 | build_mock_libvirtconn._domains.append(build_mock_domain) 146 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(build_mock_libvirtconn) 147 | backup_dir = build_backup_directory["backup_dir"] 148 | group = CompleteBackupGroup( 149 | name="test", 150 | backup_dir=str(backup_dir), 151 | hosts=["r:.*"], 152 | conn=build_mock_libvirtconn, 153 | callbacks_registrer=callbacks_registrer, 154 | ) 155 | 156 | dombkup = DomBackup( 157 | dom=build_mock_domain, 158 | backup_dir=str(backup_dir.mkdir(build_mock_domain.name())), 159 | callbacks_registrer=callbacks_registrer, 160 | ) 161 | dombkup.pending_info = dombkup.get_definition() 162 | dombkup.pending_info["domain_name"] = build_mock_domain.name() 163 | dombkup.pending_info["date"] = 0 164 | dombkup.pending_info["disks"] = {} 165 | dombkup.pending_info["name"] = "test" 166 | dombkup.pending_info["packager"] = {"type": "directory", "opts": {}} 167 | dombkup._dump_pending_info() 168 | 169 | group.scan_backup_dir() 170 | nb_initial_backups = sum(len(b) for b in group.broken_backups.values()) 171 | assert nb_initial_backups == 1 172 | 173 | broken_backup = group.broken_backups[build_mock_domain.name()][0] 174 | mocker.spy(broken_backup, "clean_aborted") 175 | 176 | group.clean_broken_backups() 177 | assert not group.broken_backups[build_mock_domain.name()] 178 | assert broken_backup.clean_aborted.called 179 | 180 | 181 | def test_complete_groups_from_dict(): 182 | """ 183 | Test groups_from_dict with only one group 184 | """ 185 | groups_config = { 186 | "test": { 187 | "target": "/mnt/test", 188 | "compression": "tar", 189 | "hosts": [ 190 | {"host": r"r:^matching\d?$", "disks": ["vda", "vdb"]}, 191 | "!matching2", 192 | "nonexisting", 193 | ], 194 | }, 195 | } 196 | 197 | groups = tuple(complete_groups_from_dict(groups_config)) 198 | assert len(groups) == 1 199 | test_group = groups[0] 200 | 201 | assert test_group.name == "test" 202 | assert test_group.backup_dir == "/mnt/test" 203 | assert test_group.hosts == [r"r:^matching\d?$", r"!matching2", r"nonexisting"] 204 | 205 | 206 | def test_complete_groups_from_dict_multiple_groups(): 207 | """ 208 | Test match_domains_from_config with a str pattern 209 | """ 210 | groups_config = { 211 | "test0": { 212 | "target": "/mnt/test0", 213 | "compression": "tar", 214 | "hosts": [ 215 | "matching2", 216 | ], 217 | }, 218 | "test1": { 219 | "target": "/mnt/test1", 220 | "hosts": ["matching", "a"], 221 | }, 222 | } 223 | 224 | groups = tuple(complete_groups_from_dict(groups_config)) 225 | assert len(groups) == 2 226 | group0, group1 = groups 227 | 228 | assert sorted((group0.name, group1.name)) == ["test0", "test1"] 229 | 230 | 231 | def test_list_backups_by_domain(build_backup_directory): 232 | backup_dir = str(build_backup_directory["backup_dir"]) 233 | backup_dates = tuple(build_backup_directory["backup_dates"]) 234 | domain_names = build_backup_directory["domain_names"] 235 | 236 | backups = list_backups_by_domain(str(backup_dir)) 237 | assert sorted(backups.keys()) == sorted(domain_names) 238 | 239 | def expected_backups(domain_id, domain_name): 240 | for backup_date in backup_dates: 241 | str_backup_date = backup_date.strftime("%Y%m%d-%H%M%S") 242 | json_filename = "{}_{}_{}.json".format( 243 | str_backup_date, domain_id, domain_name 244 | ) 245 | json_path = os.path.join(backup_dir, domain_name, json_filename) 246 | 247 | assert os.path.isfile(json_path) 248 | with open(json_path, "r") as json_file: 249 | yield (json_path, json.load(json_file)) 250 | 251 | for domain_id, domain_name in enumerate(domain_names): 252 | assert sorted(expected_backups(domain_id, domain_name)) == sorted( 253 | backups[domain_name] 254 | ) 255 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import os 4 | import yaml 5 | 6 | from virt_backup import config 7 | from virt_backup.config import get_config, Config 8 | 9 | CUR_PATH = os.path.dirname(os.path.realpath(__file__)) 10 | TESTCONF_PATH = os.path.join(CUR_PATH, "testconfig", "config.yml") 11 | 12 | 13 | @pytest.fixture 14 | def get_testing_config(): 15 | return get_config(custom_path=TESTCONF_PATH) 16 | 17 | 18 | def test_get_config(): 19 | config.CONFIG_DIRS = (os.path.join(CUR_PATH, "testconfig"),) + config.CONFIG_DIRS 20 | conf = get_config() 21 | with open(os.path.join(config.CONFIG_DIRS[0], "config.yml"), "r") as f: 22 | expected_conf = yaml.safe_load(f) 23 | 24 | assert conf == expected_conf 25 | 26 | 27 | def test_get_config_custom_path(get_testing_config): 28 | # get_config already uses a custom path, so uses it 29 | conf = get_testing_config 30 | with open(TESTCONF_PATH, "r") as f: 31 | expected_conf = yaml.safe_load(f) 32 | 33 | assert conf == expected_conf 34 | 35 | 36 | def test_get_config_not_existing(tmpdir): 37 | backup_dir = tmpdir.mkdir("no_config") 38 | testconf_path = str(backup_dir.join("config.yml")) 39 | 40 | with pytest.raises(FileNotFoundError): 41 | get_config(custom_path=testconf_path) 42 | 43 | 44 | class TestConfig: 45 | def test_config(self): 46 | Config() 47 | 48 | def test_with_default_config(self): 49 | conf = Config(defaults={"debug": True}) 50 | assert conf["debug"] 51 | 52 | def test_from_dict(self, get_testing_config): 53 | conf = Config() 54 | conf.from_dict(get_testing_config) 55 | 56 | assert sorted(conf.items()) == sorted(get_testing_config.items()) 57 | 58 | def test_from_yaml(self, get_testing_config): 59 | conf = Config() 60 | conf.from_yaml(TESTCONF_PATH) 61 | 62 | assert sorted(conf.items()) == sorted(get_testing_config.items()) 63 | 64 | def test_from_str(self, get_testing_config): 65 | conf = Config() 66 | with open(TESTCONF_PATH, "r") as conf_file: 67 | conf.from_str(conf_file.read()) 68 | 69 | assert sorted(conf.items()) == sorted(get_testing_config.items()) 70 | 71 | def test_get_groups(self, get_testing_config): 72 | conf = Config() 73 | 74 | conf["default"] = {"daily": 4} 75 | conf["groups"] = { 76 | "test_group": { 77 | "daily": 3, 78 | }, 79 | } 80 | 81 | groups = conf.get_groups() 82 | assert groups["test_group"]["daily"] == 3 83 | -------------------------------------------------------------------------------- /tests/test_domain.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from virt_backup.domains import ( 4 | search_domains_regex, 5 | get_domain_disks_of, 6 | get_domain_incompatible_disks_of, 7 | ) 8 | from virt_backup.exceptions import DiskNotFoundError 9 | 10 | from helper.virt_backup import MockDomain 11 | 12 | 13 | def test_get_domain_disks_of(build_mock_domain): 14 | domain = build_mock_domain 15 | vda = get_domain_disks_of(domain.XMLDesc(), "vda", "vdb") 16 | 17 | assert "vda" in vda 18 | 19 | 20 | def test_get_domain_incompatible_disks_of(build_mock_domain): 21 | domain = build_mock_domain 22 | disks = get_domain_incompatible_disks_of(domain.XMLDesc()) 23 | 24 | assert disks == ("vdz",) 25 | 26 | 27 | def test_get_domain_disks_of_disk_not_found(build_mock_domain): 28 | domain = build_mock_domain 29 | with pytest.raises(DiskNotFoundError): 30 | get_domain_disks_of(domain.XMLDesc(), "vda", "vdc") 31 | 32 | 33 | def test_search_domains_regex(build_mock_libvirtconn): 34 | conn = build_mock_libvirtconn 35 | domain_names = ("dom1", "dom2", "dom3", "test") 36 | conn._domains = [MockDomain(name=dom_name, _conn=conn) for dom_name in domain_names] 37 | 38 | matches = list(sorted(search_domains_regex(r"^dom\d$", conn))) 39 | expected = list(sorted(domain_names)) 40 | expected.remove("test") 41 | 42 | assert matches == expected 43 | 44 | 45 | def test_search_domains_regex_not_found(build_mock_libvirtconn, build_mock_domain): 46 | """ 47 | Search a non existing domain 48 | """ 49 | conn = build_mock_libvirtconn 50 | conn._domains = [build_mock_domain] 51 | 52 | matches = list(search_domains_regex("^dom$", conn)) 53 | assert matches == [] 54 | -------------------------------------------------------------------------------- /tests/test_packagers.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | import os 3 | import random 4 | import threading 5 | import pytest 6 | 7 | from virt_backup.exceptions import CancelledError, ImageNotFoundError 8 | from virt_backup.backups.packagers import ReadBackupPackagers, WriteBackupPackagers 9 | 10 | 11 | @pytest.fixture() 12 | def new_image(tmpdir, name="test", content=None): 13 | image = tmpdir.join(name) 14 | if content is None: 15 | # Generate a content of around 5MB. 16 | content = "{:016d}".format(random.randrange(16)) * int(5 * 2**20 / 16) 17 | image.write(content) 18 | return image 19 | 20 | 21 | @pytest.fixture() 22 | def cancel_flag(): 23 | return threading.Event() 24 | 25 | 26 | class _BaseTestBackupPackager(ABC): 27 | def test_add(self, write_packager, new_image): 28 | with write_packager: 29 | write_packager.add(str(new_image)) 30 | assert new_image.basename in write_packager.list() 31 | 32 | def test_add_custom_name(self, write_packager, new_image): 33 | name = "another_test" 34 | 35 | with write_packager: 36 | write_packager.add(str(new_image), name=name) 37 | assert name in write_packager.list() 38 | 39 | def test_add_cancelled(self, write_packager, new_image, cancel_flag): 40 | with write_packager: 41 | cancel_flag.set() 42 | with pytest.raises(CancelledError): 43 | write_packager.add(str(new_image), stop_event=cancel_flag) 44 | 45 | def test_restore(self, tmpdir, write_packager, read_packager, new_image): 46 | name = new_image.basename 47 | 48 | with write_packager: 49 | write_packager.add(str(new_image)) 50 | with read_packager: 51 | tmpdir = tmpdir.mkdir("extract") 52 | read_packager.restore(name, str(tmpdir)) 53 | 54 | extracted_image = tmpdir.join(name) 55 | assert extracted_image.check() 56 | assert tmpdir.join(name).read() == new_image.read() 57 | 58 | def test_restore_unexisting(self, tmpdir, write_packager, read_packager): 59 | with write_packager: 60 | pass 61 | with read_packager: 62 | tmpdir = tmpdir.mkdir("extract") 63 | with pytest.raises(ImageNotFoundError): 64 | read_packager.restore("test", str(tmpdir)) 65 | 66 | def test_restore_cancelled( 67 | self, tmpdir, write_packager, read_packager, new_image, cancel_flag 68 | ): 69 | name = new_image.basename 70 | 71 | with write_packager: 72 | write_packager.add(str(new_image)) 73 | with read_packager: 74 | tmpdir = tmpdir.mkdir("extract") 75 | cancel_flag.set() 76 | with pytest.raises(CancelledError): 77 | read_packager.restore(name, str(tmpdir), stop_event=cancel_flag) 78 | 79 | def test_remove_package(self, write_packager): 80 | with write_packager: 81 | pass 82 | write_packager.remove_package() 83 | assert not os.path.exists(write_packager.complete_path) 84 | 85 | def test_remove_package_cancelled(self, write_packager, cancel_flag): 86 | with write_packager: 87 | pass 88 | cancel_flag.set() 89 | with pytest.raises(CancelledError): 90 | write_packager.remove_package() 91 | 92 | 93 | class TestBackupPackagerDir(_BaseTestBackupPackager): 94 | @pytest.fixture() 95 | def read_packager(self, tmpdir): 96 | return ReadBackupPackagers.directory.value("test", str(tmpdir.join("packager"))) 97 | 98 | @pytest.fixture() 99 | def write_packager(self, tmpdir): 100 | return WriteBackupPackagers.directory.value( 101 | "test", str(tmpdir.join("packager")) 102 | ) 103 | 104 | def test_remove(self, tmpdir, write_packager, new_image): 105 | name = new_image.basename 106 | 107 | with write_packager: 108 | write_packager.add(str(new_image)) 109 | write_packager.remove(name) 110 | assert not write_packager.list() 111 | 112 | def test_remove_package_cancelled(self, write_packager, cancel_flag): 113 | """ 114 | Atomic for the directory package, so cancel it will not fail. 115 | """ 116 | with write_packager: 117 | pass 118 | cancel_flag.set() 119 | write_packager.remove_package() 120 | assert not os.path.exists(write_packager.complete_path) 121 | 122 | 123 | class TestBackupPackagerTar(_BaseTestBackupPackager): 124 | @pytest.fixture() 125 | def read_packager(self, tmpdir): 126 | return ReadBackupPackagers.tar.value( 127 | "test", str(tmpdir.join("packager")), "test_package.tar" 128 | ) 129 | 130 | @pytest.fixture() 131 | def write_packager(self, tmpdir): 132 | return WriteBackupPackagers.tar.value( 133 | "test", str(tmpdir.join("packager")), "test_package.tar" 134 | ) 135 | 136 | def test_remove_package_cancelled(self, write_packager, cancel_flag): 137 | """ 138 | Atomic for the tar package, so cancel it will not fail. 139 | """ 140 | with write_packager: 141 | pass 142 | cancel_flag.set() 143 | write_packager.remove_package() 144 | assert not os.path.exists(write_packager.complete_path) 145 | 146 | 147 | @pytest.mark.extra 148 | class TestBackupPackagerZSTD(_BaseTestBackupPackager): 149 | @pytest.fixture() 150 | def read_packager(self, tmpdir): 151 | return ReadBackupPackagers.zstd.value( 152 | "test", str(tmpdir.join("packager")), "test_package" 153 | ) 154 | 155 | @pytest.fixture() 156 | def write_packager(self, tmpdir): 157 | return WriteBackupPackagers.zstd.value( 158 | "test", str(tmpdir.join("packager")), "test_package" 159 | ) 160 | 161 | def test_remove_package(self, write_packager, new_image): 162 | with write_packager: 163 | write_packager.add(str(new_image), name="another_test") 164 | backups = write_packager.list() 165 | 166 | # Try to create a .zst file in the same directory, to check #29. 167 | other_file = os.path.join(write_packager.complete_path, "test.zst") 168 | with open(other_file, "w") as f: 169 | f.write("") 170 | 171 | write_packager.remove_package() 172 | 173 | for b in backups: 174 | assert not os.path.exists(write_packager.archive_path(b)) 175 | 176 | # Checks that remove_package only removed the wanted backups. 177 | assert os.path.exists(other_file) 178 | 179 | def test_remove_package_cancelled(self, write_packager, new_image, cancel_flag): 180 | with write_packager: 181 | write_packager.add(str(new_image), name="another_test") 182 | 183 | # Try to create a .zst file in the same directory, to check #29. 184 | other_file = os.path.join(write_packager.complete_path, "test.zst") 185 | with open(other_file, "w") as f: 186 | f.write("") 187 | 188 | cancel_flag.set() 189 | with pytest.raises(CancelledError): 190 | write_packager.remove_package(cancel_flag) 191 | -------------------------------------------------------------------------------- /tests/test_snapshot.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import arrow 4 | import libvirt 5 | import pytest 6 | 7 | from virt_backup.backups import DomBackup 8 | from virt_backup.domains import get_xml_block_of_disk 9 | from virt_backup.backups.snapshot import DomExtSnapshot, DomExtSnapshotCallbackRegistrer 10 | from virt_backup.exceptions import DiskNotFoundError, SnapshotNotStarted 11 | from helper.virt_backup import MockSnapshot 12 | 13 | 14 | class TestDomExtSnapshot: 15 | snapshot_helper = None 16 | 17 | @pytest.fixture(autouse=True) 18 | def gen_snapshot_helper(self, build_mock_domain): 19 | dom = build_mock_domain 20 | callbacks_registrer = DomExtSnapshotCallbackRegistrer(dom._conn) 21 | self.snapshot_helper = DomExtSnapshot( 22 | dom=dom, 23 | callbacks_registrer=callbacks_registrer, 24 | disks={ 25 | "vda": {"src": "/vda.qcow2", "type": "qcow2"}, 26 | "vdb": {"src": "/vdb.qcow2", "type": "qcow2"}, 27 | }, 28 | ) 29 | 30 | def test_snapshot_logic_date(self, monkeypatch): 31 | """ 32 | Create a DomBackup and test to add vdc 33 | """ 34 | pre_snap_date = arrow.now() 35 | metadatas = self.start_snapshot(monkeypatch) 36 | post_snap_date = arrow.now() 37 | 38 | snapshot_date = metadatas["date"] 39 | 40 | assert snapshot_date >= pre_snap_date 41 | assert snapshot_date <= post_snap_date 42 | 43 | def test_snapshot_disks_infos(self, monkeypatch): 44 | """ 45 | Check if metadatas contains the necessary infos 46 | """ 47 | metadatas = self.start_snapshot(monkeypatch) 48 | 49 | assert len(self.snapshot_helper.disks) == len(metadatas["disks"]) 50 | for disk in self.snapshot_helper.disks: 51 | assert sorted(("snapshot", "src", "type")) == sorted( 52 | metadatas["disks"][disk].keys() 53 | ) 54 | 55 | def test_snapshot_correct_snapshot_path(self, monkeypatch): 56 | """ 57 | Check if the snapshot is done is the same path as its source disk 58 | """ 59 | metadatas = self.start_snapshot(monkeypatch) 60 | 61 | for disk in metadatas["disks"].values(): 62 | assert os.path.dirname(disk["src"]) == os.path.dirname(disk["snapshot"]) 63 | 64 | def start_snapshot(self, monkeypatch): 65 | monkeypatch.setattr( 66 | self.snapshot_helper, "external_snapshot", lambda: MockSnapshot("123") 67 | ) 68 | 69 | return self.snapshot_helper.start() 70 | 71 | def test_external_snapshot(self): 72 | snap = self.snapshot_helper.external_snapshot() 73 | assert isinstance(snap, MockSnapshot) 74 | 75 | def test_external_snapshot_quiesce_fallback(self): 76 | tried = {"quiesce": False} 77 | 78 | def mock_quiesce_failure(_, flags): 79 | if (flags & libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) != 0: 80 | tried["quiesce"] = True 81 | raise libvirt.libvirtError("quiesce error") 82 | 83 | return MockSnapshot("123") 84 | 85 | self.snapshot_helper.dom.set_mock_snapshot_create(mock_quiesce_failure) 86 | self.snapshot_helper.quiesce = True 87 | 88 | snap = self.snapshot_helper.external_snapshot() 89 | assert tried["quiesce"] 90 | assert isinstance(snap, MockSnapshot) 91 | 92 | def test_get_snapshot_flags(self): 93 | flags = self.snapshot_helper._get_snapshot_flags() 94 | assert flags == ( 95 | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY 96 | + libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC 97 | + libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA 98 | ) 99 | 100 | def test_get_snapshot_flags_quiesce(self): 101 | flags = self.snapshot_helper._get_snapshot_flags(quiesce=True) 102 | assert (flags & libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) != 0 103 | 104 | def test_gen_libvirt_snapshot_xml(self): 105 | expected_xml = ( 106 | "\n" 107 | " Pre-backup external snapshot\n" 108 | " \n" 109 | ' \n' 110 | ' \n' 111 | ' \n' 112 | " \n" 113 | "\n" 114 | ) 115 | assert self.snapshot_helper.gen_libvirt_snapshot_xml() == expected_xml 116 | 117 | def test_gen_libvirt_snapshot_xml_ignored_disk(self): 118 | self.snapshot_helper.disks.pop("vdb") 119 | expected_xml = ( 120 | "\n" 121 | " Pre-backup external snapshot\n" 122 | " \n" 123 | ' \n' 124 | ' \n' 125 | ' \n' 126 | " \n" 127 | "\n" 128 | ) 129 | assert self.snapshot_helper.gen_libvirt_snapshot_xml() == expected_xml 130 | 131 | def test_manually_pivot_disk(self, build_mock_libvirtconn): 132 | self.snapshot_helper.conn = build_mock_libvirtconn 133 | self.snapshot_helper._manually_pivot_disk("vda", "/testvda", "qcow2") 134 | dom_xml = self.snapshot_helper.dom.XMLDesc() 135 | assert self.get_src_for_disk(dom_xml, "vda") == "/testvda" 136 | 137 | def get_src_for_disk(self, dom_xml, disk): 138 | elem = get_xml_block_of_disk(dom_xml, disk) 139 | return elem.xpath("source")[0].get("file") 140 | 141 | def test_manually_pivot_disk_libvirt_2(self, build_mock_libvirtconn): 142 | """ 143 | Test manual pivot with libvirt < 3.0 144 | """ 145 | conn = build_mock_libvirtconn 146 | conn._libvirt_version = 2000000 147 | conn._domains.append(self.snapshot_helper.dom) 148 | 149 | return self.test_manually_pivot_disk(conn) 150 | 151 | def test_manually_pivot_unexistant_disk(self): 152 | with pytest.raises(DiskNotFoundError): 153 | self.snapshot_helper._manually_pivot_disk("sda", "/testvda", "qcow2") 154 | 155 | def test_clean_no_metadata(self): 156 | with pytest.raises(SnapshotNotStarted): 157 | self.snapshot_helper.clean() 158 | 159 | def test_clean(self, monkeypatch, tmpdir): 160 | snapdir = self.prepare_test_clean(monkeypatch, tmpdir) 161 | self.snapshot_helper.clean() 162 | 163 | assert len(snapdir.listdir()) == 0 164 | 165 | def prepare_test_clean(self, monkeypatch, tmpdir): 166 | snapshots = self.create_temp_snapshot_files(tmpdir) 167 | 168 | self.mock_pivot_mechanism(monkeypatch) 169 | # set the domain unactive to avoid the blockcommit 170 | self.snapshot_helper.dom.set_state(0, 0) 171 | 172 | self.snapshot_helper.metadatas = { 173 | "date": arrow.now(), 174 | "disks": { 175 | disk: {"src": prop["src"], "snapshot": snapshots[disk], "type": "qcow2"} 176 | for disk, prop in self.snapshot_helper.disks.items() 177 | }, 178 | } 179 | return tmpdir.join("snaps") 180 | 181 | def create_temp_snapshot_files(self, tmpdir): 182 | tmpdir = tmpdir.mkdir("snaps") 183 | self.snapshot_helper.dom.set_storage_basedir(os.path.abspath(str(tmpdir))) 184 | 185 | snapshots = {} 186 | # swap disk and snapshots, to just change the domain basedir 187 | for disk, prop in self.snapshot_helper.disks.items(): 188 | dom_disk_path = ( 189 | (get_xml_block_of_disk(self.snapshot_helper.dom.XMLDesc(), disk)) 190 | .xpath("source")[0] 191 | .get("file") 192 | ) 193 | tmpdir.join(os.path.basename(dom_disk_path)).write("") 194 | prop["snapshot"] = dom_disk_path 195 | 196 | disk_path = tmpdir.join("{}.qcow2.{}".format(disk, "123")) 197 | prop["src"] = str(disk_path) 198 | snapshots[disk] = prop["snapshot"] 199 | 200 | return snapshots 201 | 202 | def mock_pivot_mechanism(self, monkeypatch): 203 | monkeypatch.setattr( 204 | self.snapshot_helper, "_qemu_img_commit", lambda *args: None 205 | ) 206 | 207 | monkeypatch.setattr( 208 | self.snapshot_helper, "_manually_pivot_disk", lambda *args: None 209 | ) 210 | -------------------------------------------------------------------------------- /tests/test_unsupported.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from virt_backup.exceptions import UnsupportedPackagerError 4 | from virt_backup.backups.packagers import ReadBackupPackagers, WriteBackupPackagers 5 | from virt_backup.backups.packagers.unsupported import ( 6 | UnsupportedReadBackupPackagerZSTD, 7 | UnsupportedWriteBackupPackagerZSTD, 8 | ) 9 | 10 | 11 | @pytest.mark.no_extra 12 | @pytest.mark.no_zstd 13 | class TestUnsupportedZSTD: 14 | def test_zstd_unsupported(self): 15 | assert ReadBackupPackagers.zstd.value == UnsupportedReadBackupPackagerZSTD 16 | assert WriteBackupPackagers.zstd.value == UnsupportedWriteBackupPackagerZSTD 17 | 18 | def test_unsupported_error(self): 19 | packagers = (ReadBackupPackagers.zstd.value, WriteBackupPackagers.zstd.value) 20 | for packager in packagers: 21 | with pytest.raises(UnsupportedPackagerError): 22 | packager() 23 | -------------------------------------------------------------------------------- /tests/testconfig/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | debug: true 4 | uri: "qemu:///system" 5 | # username: 6 | # passphrase: 7 | 8 | default: 9 | daily: 4 10 | weekly: 2 11 | monthly: 5 12 | yearly: 1 13 | 14 | 15 | groups: 16 | test: 17 | target: /mnt/kvm/backups 18 | packager: tar 19 | packager_opts: 20 | compression: xz 21 | 22 | autostart: true 23 | daily: 3 24 | weekly: 2 25 | monthly: 5 26 | 27 | hosts: 28 | - host: domainname 29 | disks: 30 | - vda 31 | - vdb 32 | - domainname2 33 | - "r:.*" 34 | - "!domainname3" 35 | - "!g:stable" 36 | - "r:^dom.*" 37 | 38 | # vim: set ts=2 sw=2: 39 | -------------------------------------------------------------------------------- /tests/testconfig/versions/0.4/post.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | debug: False 4 | threads: 1 5 | 6 | uri: "qemu:///system" 7 | username: 8 | passphrase: 9 | 10 | default: 11 | daily: 4 12 | weekly: 2 13 | monthly: 5 14 | yearly: 1 15 | 16 | groups: 17 | test: 18 | target: /mnt/kvm/backups 19 | packager: tar 20 | packager_opts: 21 | compression: xz 22 | compression_lvl: 6 23 | autostart: True 24 | 25 | hourly: 1 26 | daily: 3 27 | weekly: 2 28 | monthly: 5 29 | yearly: 1 30 | 31 | hosts: 32 | - host: domainname 33 | disks: 34 | - vda 35 | - vdb 36 | - domainname2 37 | - "r:^prod.*" 38 | - "!domainname3" 39 | - "!r:^test.*" 40 | 41 | test_dir: 42 | packager: directory 43 | target: /mnt/kvm/backups 44 | -------------------------------------------------------------------------------- /tests/testconfig/versions/0.4/pre.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | debug: False 4 | threads: 1 5 | 6 | uri: "qemu:///system" 7 | username: 8 | passphrase: 9 | 10 | default: 11 | daily: 4 12 | weekly: 2 13 | monthly: 5 14 | yearly: 1 15 | 16 | groups: 17 | test: 18 | target: /mnt/kvm/backups 19 | compression: xz 20 | compression_lvl: 6 21 | autostart: True 22 | 23 | hourly: 1 24 | daily: 3 25 | weekly: 2 26 | monthly: 5 27 | yearly: 1 28 | 29 | hosts: 30 | - host: domainname 31 | disks: 32 | - vda 33 | - vdb 34 | - domainname2 35 | - "r:^prod.*" 36 | - "!domainname3" 37 | - "!r:^test.*" 38 | 39 | test_dir: 40 | compression: 41 | target: /mnt/kvm/backups 42 | -------------------------------------------------------------------------------- /tests/testconfig/versions/full/0.1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | debug: False 4 | threads: 1 5 | 6 | uri: "qemu:///system" 7 | username: 8 | passphrase: 9 | 10 | default: 11 | daily: 4 12 | weekly: 2 13 | monthly: 5 14 | yearly: 1 15 | 16 | groups: 17 | test: 18 | target: /mnt/kvm/backups 19 | compression: xz 20 | compression_lvl: 6 21 | autostart: True 22 | 23 | hourly: 1 24 | daily: 3 25 | weekly: 2 26 | monthly: 5 27 | yearly: 1 28 | 29 | hosts: 30 | - host: domainname 31 | disks: 32 | - vda 33 | - vdb 34 | - domainname2 35 | - "r:^prod.*" 36 | - "!domainname3" 37 | - "!r:^test.*" 38 | 39 | test_dir: 40 | compression: 41 | target: /mnt/kvm/backups 42 | -------------------------------------------------------------------------------- /tests/testconfig/versions/full/0.4.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | debug: False 4 | threads: 1 5 | 6 | uri: "qemu:///system" 7 | username: 8 | passphrase: 9 | 10 | default: 11 | daily: 4 12 | weekly: 2 13 | monthly: 5 14 | yearly: 1 15 | 16 | groups: 17 | test: 18 | target: /mnt/kvm/backups 19 | packager: tar 20 | packager_opts: 21 | compression: xz 22 | compression_lvl: 6 23 | autostart: True 24 | 25 | hourly: 1 26 | daily: 3 27 | weekly: 2 28 | monthly: 5 29 | yearly: 1 30 | 31 | hosts: 32 | - host: domainname 33 | disks: 34 | - vda 35 | - vdb 36 | - domainname2 37 | - "r:^prod.*" 38 | - "!domainname3" 39 | - "!r:^test.*" 40 | 41 | test_dir: 42 | packager: directory 43 | target: /mnt/kvm/backups 44 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [testenv] 2 | deps = 3 | apipkg 4 | libvirt-python 5 | pytest 6 | pytest-mock 7 | 8 | [testenv:full] 9 | deps = 10 | apipkg 11 | libvirt-python 12 | pytest 13 | pytest-mock 14 | 15 | commands= python setup.py test 16 | 17 | [testenv:min] 18 | deps = 19 | apipkg 20 | libvirt-python 21 | pytest 22 | pytest-mock 23 | 24 | commands= python setup.py testmin 25 | 26 | 27 | [testenv:black] 28 | usedevelop=True 29 | basepython=python3.11 30 | changedir=. 31 | deps = 32 | {[testenv]deps} 33 | black 34 | commands= 35 | black --check virt_backup tests 36 | -------------------------------------------------------------------------------- /virt-backup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Libvirt Backup 5 | """ 6 | 7 | from virt_backup import __main__ 8 | 9 | if __name__ == "__main__": 10 | __main__.parse_args_and_run(__main__.build_parser()) 11 | -------------------------------------------------------------------------------- /virt_backup/__init__.py: -------------------------------------------------------------------------------- 1 | APP_NAME = "virt-backup" 2 | VERSION = "0.5.6" 3 | -------------------------------------------------------------------------------- /virt_backup/backups/__init__.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import logging 3 | import os 4 | import threading 5 | from virt_backup.domains import get_domain_disks_of 6 | 7 | 8 | __all__ = [ 9 | "DomBackup", 10 | "DomCompleteBackup", 11 | "DomExtSnapshotCallbackRegistrer", 12 | "build_dom_complete_backup_from_def", 13 | "build_dom_backup_from_pending_info", 14 | ] 15 | 16 | 17 | logger = logging.getLogger("virt_backup") 18 | 19 | 20 | class _BaseDomBackup(ABC): 21 | backup_dir = "" 22 | dom = None 23 | packager = "" 24 | packager_opts = None 25 | 26 | def __init__(self, *args, **kwargs): 27 | self._cancel_flag = threading.Event() 28 | 29 | def cancel(self): 30 | self._cancel_flag.set() 31 | 32 | def _parse_dom_xml(self): 33 | """ 34 | Parse the domain's definition 35 | """ 36 | raise NotImplementedError 37 | 38 | def _main_backup_name_format(self, snapdate, *args, **kwargs): 39 | """ 40 | Main backup name format 41 | 42 | Extracted in its own function so it can be easily override 43 | 44 | :param snapdate: date when external snapshots have been created 45 | """ 46 | str_snapdate = snapdate.strftime("%Y%m%d-%H%M%S") 47 | return "{}_{}_{}".format(str_snapdate, self.dom.ID(), self.dom.name()) 48 | 49 | def _get_read_packager(self, name): 50 | kwargs = self._get_packager_kwargs(name) 51 | return getattr(ReadBackupPackagers, self.packager).value(**kwargs) 52 | 53 | def _get_write_packager(self, name): 54 | kwargs = self._get_packager_kwargs(name) 55 | return getattr(WriteBackupPackagers, self.packager).value(**kwargs) 56 | 57 | def _get_packager_kwargs(self, name): 58 | """ 59 | Get packager returns an adapted packager and update the pending info and 60 | definition. 61 | """ 62 | kwargs = {"name": name, "path": self.backup_dir, **self.packager_opts} 63 | specific_kwargs = {} 64 | if self.packager == "tar": 65 | specific_kwargs["archive_name"] = name 66 | elif self.packager == "zstd": 67 | specific_kwargs["name_prefix"] = name 68 | kwargs.update(specific_kwargs) 69 | 70 | return kwargs 71 | 72 | def _get_self_domain_disks(self, *filter_dev): 73 | dom_xml = self._parse_dom_xml() 74 | return get_domain_disks_of(dom_xml, *filter_dev) 75 | 76 | def _delete_with_error_printing(self, file_to_remove): 77 | try: 78 | os.remove(self.get_complete_path_of(file_to_remove)) 79 | except Exception as e: 80 | logger.error("Error removing {}: {}".format(file_to_remove, e)) 81 | 82 | def _clean_packager(self, packager, disks): 83 | """ 84 | If the package is shareable, will remove each disk backup then will 85 | only remove the packager if empty. 86 | """ 87 | if packager.is_shareable: 88 | with packager: 89 | for d in disks: 90 | packager.remove(d) 91 | if packager.list(): 92 | # Other non related backups still exists, do not delete 93 | # the package. 94 | return 95 | 96 | packager.remove_package(self._cancel_flag) 97 | 98 | def get_complete_path_of(self, filename): 99 | return os.path.join(self.backup_dir, filename) 100 | 101 | 102 | from .complete import DomCompleteBackup, build_dom_complete_backup_from_def 103 | from .packagers import ReadBackupPackagers, WriteBackupPackagers 104 | from .pending import DomBackup, build_dom_backup_from_pending_info 105 | from .snapshot import DomExtSnapshotCallbackRegistrer 106 | -------------------------------------------------------------------------------- /virt_backup/backups/complete.py: -------------------------------------------------------------------------------- 1 | import arrow 2 | import logging 3 | import lxml.etree 4 | import os 5 | import shutil 6 | import tarfile 7 | 8 | from virt_backup.backups.packagers import ReadBackupPackagers, WriteBackupPackagers 9 | from virt_backup.compat_layers.definition import convert as compat_convert_definition 10 | from virt_backup.domains import get_domain_disks_of 11 | from virt_backup.exceptions import DomainRunningError 12 | from virt_backup.tools import copy_file 13 | from . import _BaseDomBackup 14 | 15 | 16 | logger = logging.getLogger("virt_backup") 17 | 18 | 19 | def build_dom_complete_backup_from_def( 20 | definition, backup_dir, definition_filename=None 21 | ): 22 | compat_convert_definition(definition) 23 | backup = DomCompleteBackup( 24 | name=definition["name"], 25 | dom_name=definition["domain_name"], 26 | backup_dir=backup_dir, 27 | date=arrow.get(definition["date"]), 28 | dom_xml=definition.get("domain_xml", None), 29 | disks=definition.get("disks", None), 30 | packager=definition["packager"]["type"], 31 | packager_opts=definition["packager"].get("opts", {}), 32 | ) 33 | 34 | if definition_filename: 35 | backup.definition_filename = definition_filename 36 | 37 | return backup 38 | 39 | 40 | class DomCompleteBackup(_BaseDomBackup): 41 | def __init__( 42 | self, 43 | name, 44 | dom_name, 45 | backup_dir, 46 | date=None, 47 | dom_xml=None, 48 | disks=None, 49 | packager="tar", 50 | packager_opts=None, 51 | definition_filename=None, 52 | ): 53 | super().__init__() 54 | 55 | #: domain name 56 | self.dom_name = dom_name 57 | 58 | #: backup directory path 59 | self.backup_dir = backup_dir 60 | 61 | #: definition filename 62 | self.definition_filename = definition_filename 63 | 64 | #: name is the backup name. It is used by the packagers and internal process. 65 | self.name = name 66 | 67 | #: backup date 68 | self.date = date 69 | 70 | #: domain XML as it was during the backup 71 | self.dom_xml = dom_xml 72 | 73 | #: packager name 74 | self.packager = packager if packager else "directory" 75 | 76 | #: packager options arguments used during compression 77 | self.packager_opts = packager_opts or {} 78 | 79 | #: expected format: {disk_name1: filename1, disk_name2: filename2, …} 80 | self.disks = disks 81 | 82 | def restore_replace_domain(self, conn, id=None): 83 | """ 84 | :param conn: libvirt connection to the hypervisor 85 | :param id: new id for the restored domain 86 | """ 87 | dom_xml = self._get_dom_xml_with_other_id(id) if id else self.dom_xml 88 | return conn.defineXML(dom_xml) 89 | 90 | def _get_dom_xml_with_other_id(self, id): 91 | parsed_dxml = self._parse_dom_xml() 92 | parsed_dxml.set("id", str(id)) 93 | 94 | return lxml.etree.tostring(parsed_dxml, pretty_print=True).decode() 95 | 96 | def _parse_dom_xml(self): 97 | return lxml.etree.fromstring( 98 | self.dom_xml, lxml.etree.XMLParser(resolve_entities=False) 99 | ) 100 | 101 | def restore_and_replace_disk_of(self, disk, domain, disk_to_replace): 102 | """ 103 | Restore a disk by replacing an old disks 104 | 105 | :param disk: disk name 106 | :param domain: domain to target 107 | :param disk_to_replace: which disk of `domain` to replace 108 | """ 109 | self._ensure_domain_not_running(domain) 110 | disk_target_path = get_domain_disks_of(domain.XMLDesc(), disk_to_replace)[disk][ 111 | "src" 112 | ] 113 | 114 | # TODO: restore disk with a correct extension, and not by keeping the 115 | # old disk one 116 | result = self.restore_disk_to(disk, disk_target_path) 117 | self._copy_disk_driver_with_domain(disk, domain, disk_to_replace) 118 | return result 119 | 120 | def _ensure_domain_not_running(self, domain): 121 | if domain.isActive(): 122 | raise DomainRunningError(domain) 123 | 124 | def _copy_disk_driver_with_domain(self, disk, domain, domain_disk): 125 | disk_xml = self._get_elemxml_of_domain_disk(self._parse_dom_xml(), disk) 126 | domain_xml = lxml.etree.fromstring( 127 | domain.XMLDesc(), lxml.etree.XMLParser(resolve_entities=False) 128 | ) 129 | domain_disk_xml = self._get_elemxml_of_domain_disk(domain_xml, domain_disk) 130 | 131 | domain_disk_xml.replace( 132 | domain_disk_xml.xpath("driver")[0], disk_xml.xpath("driver")[0] 133 | ) 134 | 135 | def _get_elemxml_of_domain_disk(self, dom_xml, disk): 136 | for elem in dom_xml.xpath("devices/disk"): 137 | try: 138 | if elem.get("device", None) == "disk": 139 | dev = elem.xpath("target")[0].get("dev") 140 | if dev == disk: 141 | return elem 142 | except IndexError: 143 | continue 144 | 145 | def restore_to(self, target): 146 | if not os.path.exists(target): 147 | os.makedirs(target) 148 | 149 | # TODO: store the original images names in the definition file 150 | disks_src = get_domain_disks_of(self.dom_xml) 151 | for d in self.disks: 152 | original_img_name = os.path.basename(disks_src[d]["src"]) 153 | self.restore_disk_to(d, os.path.join(target, original_img_name)) 154 | xml_path = "{}.xml".format(os.path.join(target, self.dom_name)) 155 | with open(xml_path, "w") as xml_file: 156 | xml_file.write(self.dom_xml or "") 157 | 158 | def restore_disk_to(self, disk, target): 159 | """ 160 | :param disk: disk name 161 | :param target: destination path for the restoration 162 | """ 163 | packager = self._get_packager() 164 | with packager: 165 | return packager.restore(self.disks[disk], target, self._cancel_flag) 166 | 167 | def _get_packager(self): 168 | return self._get_read_packager(self.name) 169 | 170 | def _get_write_packager(self): 171 | return super()._get_write_packager(self.name) 172 | 173 | def delete(self): 174 | if not self.backup_dir: 175 | raise Exception("Backup dir not defined, cannot clean backup") 176 | 177 | packager = self._get_write_packager() 178 | self._clean_packager(packager, self.disks.values()) 179 | if self.definition_filename: 180 | os.remove(self.get_complete_path_of(self.definition_filename)) 181 | -------------------------------------------------------------------------------- /virt_backup/backups/packagers/__init__.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import Enum 3 | import logging 4 | 5 | from virt_backup.exceptions import ( 6 | BackupPackagerNotOpenedError, 7 | BackupPackagerOpenedError, 8 | ) 9 | 10 | 11 | logger = logging.getLogger("virt_backup") 12 | 13 | 14 | def _opened_only(f): 15 | def wrapper(self, *args, **kwargs): 16 | self.assert_opened() 17 | return f(self, *args, **kwargs) 18 | 19 | return wrapper 20 | 21 | 22 | def _closed_only(f): 23 | def wrapper(self, *args, **kwargs): 24 | self.assert_closed() 25 | return f(self, *args, **kwargs) 26 | 27 | return wrapper 28 | 29 | 30 | class _AbstractBackupPackager(ABC): 31 | closed = True 32 | #: is_shareable indicates if the same packager can be shared with multiple 33 | #: backups. 34 | is_shareable = False 35 | 36 | def __init__(self, name=None, *args, **kwargs): 37 | #: Used for logging 38 | self.name = name 39 | 40 | def __enter__(self): 41 | return self.open() 42 | 43 | def __exit__(self, *exc): 44 | self.close() 45 | 46 | @property 47 | def complete_path(self): 48 | pass 49 | 50 | @abstractmethod 51 | def open(self): 52 | return self 53 | 54 | @abstractmethod 55 | def close(self): 56 | pass 57 | 58 | @abstractmethod 59 | def list(self): 60 | pass 61 | 62 | def assert_opened(self): 63 | if self.closed: 64 | raise BackupPackagerNotOpenedError(self) 65 | 66 | def assert_closed(self): 67 | if not self.closed: 68 | raise BackupPackagerOpenedError(self) 69 | 70 | def log(self, level, message, *args, **kwargs): 71 | if self.name: 72 | message = "{}: {}".format(self.name, message) 73 | logger.log(level, message, *args, **kwargs) 74 | 75 | 76 | class _AbstractReadBackupPackager(_AbstractBackupPackager, ABC): 77 | @abstractmethod 78 | def restore(self, name, target, stop_event=None): 79 | pass 80 | 81 | 82 | class _AbstractWriteBackupPackager: 83 | @abstractmethod 84 | def add(self, src, name=None, stop_event=None): 85 | pass 86 | 87 | @abstractmethod 88 | def remove_package(self, stop_event=None): 89 | pass 90 | 91 | 92 | class _AbstractShareableWriteBackupPackager(_AbstractBackupPackager, ABC): 93 | is_shareable = True 94 | 95 | @abstractmethod 96 | def remove(self, name): 97 | pass 98 | 99 | 100 | from .directory import ReadBackupPackagerDir, WriteBackupPackagerDir 101 | from .tar import ReadBackupPackagerTar, WriteBackupPackagerTar 102 | 103 | try: 104 | from .zstd import ReadBackupPackagerZSTD, WriteBackupPackagerZSTD 105 | except ImportError as e: 106 | from .unsupported import ( 107 | UnsupportedReadBackupPackagerZSTD, 108 | UnsupportedWriteBackupPackagerZSTD, 109 | ) 110 | 111 | ReadBackupPackagerZSTD, WriteBackupPackagerZSTD = ( 112 | UnsupportedReadBackupPackagerZSTD, 113 | UnsupportedWriteBackupPackagerZSTD, 114 | ) 115 | error = str(e) 116 | ReadBackupPackagerZSTD.reason, WriteBackupPackagerZSTD.reason = (error, error) 117 | 118 | 119 | class ReadBackupPackagers(Enum): 120 | directory = ReadBackupPackagerDir 121 | tar = ReadBackupPackagerTar 122 | zstd = ReadBackupPackagerZSTD 123 | 124 | 125 | class WriteBackupPackagers(Enum): 126 | directory = WriteBackupPackagerDir 127 | tar = WriteBackupPackagerTar 128 | zstd = WriteBackupPackagerZSTD 129 | -------------------------------------------------------------------------------- /virt_backup/backups/packagers/directory.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import shutil 4 | 5 | from virt_backup.exceptions import CancelledError, ImageNotFoundError 6 | from . import ( 7 | _AbstractBackupPackager, 8 | _AbstractReadBackupPackager, 9 | _AbstractShareableWriteBackupPackager, 10 | _opened_only, 11 | _closed_only, 12 | ) 13 | 14 | 15 | class _AbstractBackupPackagerDir(_AbstractBackupPackager): 16 | """ 17 | Images are just copied in a directory 18 | """ 19 | 20 | def __init__(self, name, path, *args, **kwargs): 21 | super().__init__(name) 22 | self.path = path 23 | 24 | @property 25 | def complete_path(self): 26 | return self.path 27 | 28 | def open(self): 29 | if not os.path.isdir(self.path): 30 | os.makedirs(self.path) 31 | 32 | self.closed = False 33 | return self 34 | 35 | @_opened_only 36 | def close(self): 37 | self.closed = True 38 | 39 | @_opened_only 40 | def list(self): 41 | return os.listdir(self.path) 42 | 43 | def _copy_file(self, src, dst, stop_event=None, buffersize=2**20): 44 | if not os.path.exists(dst) and dst.endswith("/"): 45 | os.makedirs(dst) 46 | if os.path.isdir(dst): 47 | dst = os.path.join(dst, os.path.basename(src)) 48 | 49 | if stop_event and stop_event.is_set(): 50 | raise CancelledError() 51 | with open(src, "rb") as fsrc, open(dst, "xb") as fdst: 52 | while True: 53 | if stop_event and stop_event.is_set(): 54 | raise CancelledError() 55 | data = fsrc.read(buffersize) 56 | if not data: 57 | break 58 | 59 | if stop_event and stop_event.is_set(): 60 | raise CancelledError() 61 | fdst.write(data) 62 | return dst 63 | 64 | 65 | class ReadBackupPackagerDir(_AbstractReadBackupPackager, _AbstractBackupPackagerDir): 66 | @_opened_only 67 | def restore(self, name, target, stop_event=None): 68 | src = os.path.join(self.path, name) 69 | if not os.path.exists(src): 70 | raise ImageNotFoundError(name, self.path) 71 | 72 | self.log(logging.DEBUG, "Restore %s in %s", src, target) 73 | return self._copy_file(src, target, stop_event=stop_event) 74 | 75 | 76 | class WriteBackupPackagerDir( 77 | _AbstractShareableWriteBackupPackager, _AbstractBackupPackagerDir 78 | ): 79 | @_opened_only 80 | def add(self, src, name=None, stop_event=None): 81 | if not name: 82 | name = os.path.basename(src) 83 | target = os.path.join(self.path, name) 84 | self.log(logging.DEBUG, "Copy %s as %s", src, target) 85 | self._copy_file(src, target, stop_event=stop_event) 86 | 87 | return target 88 | 89 | @_opened_only 90 | def remove(self, name): 91 | target = os.path.join(self.path, name) 92 | self.log(logging.DEBUG, "Remove file %s", target) 93 | os.remove(target) 94 | 95 | @_closed_only 96 | def remove_package(self, stop_event=None): 97 | if not os.path.exists(self.complete_path): 98 | raise FileNotFoundError(self.complete_path) 99 | 100 | return shutil.rmtree(self.complete_path) 101 | -------------------------------------------------------------------------------- /virt_backup/backups/packagers/tar.py: -------------------------------------------------------------------------------- 1 | import io 2 | import logging 3 | import os 4 | import re 5 | import shutil 6 | import tarfile 7 | 8 | from virt_backup.exceptions import CancelledError, ImageNotFoundError, ImageFoundError 9 | from . import ( 10 | _AbstractBackupPackager, 11 | _AbstractReadBackupPackager, 12 | _AbstractWriteBackupPackager, 13 | _opened_only, 14 | _closed_only, 15 | ) 16 | 17 | 18 | class _AbstractBackupPackagerTar(_AbstractBackupPackager): 19 | _tarfile = None 20 | _mode = "" 21 | 22 | def __init__( 23 | self, 24 | name, 25 | path, 26 | archive_name, 27 | compression=None, 28 | compression_lvl=None, 29 | *args, 30 | **kwargs 31 | ): 32 | super().__init__(name) 33 | 34 | #: directory path to store the tarfile in 35 | self.path = path 36 | 37 | #: tarfile archive name (does not have to contain an extension, what 38 | #: will be computed automatically) 39 | self.archive_name = archive_name 40 | 41 | self.compression = compression 42 | self.compression_lvl = compression_lvl 43 | 44 | @property 45 | def complete_path(self): 46 | if self.compression not in (None, "tar"): 47 | extension = "tar.{}".format(self.compression) 48 | else: 49 | extension = "tar" 50 | 51 | if re.match(r".*\.tar\.?.*$", self.archive_name): 52 | complete_path = os.path.join(self.path, self.archive_name) 53 | else: 54 | complete_path = os.path.join( 55 | self.path, "{}.{}".format(self.archive_name, extension) 56 | ) 57 | 58 | return complete_path 59 | 60 | def open(self): 61 | self._tarfile = self._open_tar(self._mode) 62 | self.closed = False 63 | return self 64 | 65 | def _open_tar(self, mode_prefix): 66 | extra_args = {} 67 | if self.compression not in (None, "tar"): 68 | mode_suffix = "{}".format(self.compression) 69 | if self.compression_lvl: 70 | if self.compression == "xz": 71 | extra_args["preset"] = self.compression_lvl 72 | else: 73 | extra_args["compresslevel"] = self.compression_lvl 74 | else: 75 | mode_suffix = "" 76 | 77 | if not os.path.isdir(self.path): 78 | os.makedirs(self.path) 79 | 80 | mode = "{}:{}".format(mode_prefix, mode_suffix) if mode_suffix else mode_prefix 81 | return tarfile.open(self.complete_path, mode, **extra_args) 82 | 83 | @_opened_only 84 | def close(self): 85 | self._tarfile.close() 86 | self.closed = True 87 | 88 | @_opened_only 89 | def list(self): 90 | return self._tarfile.getnames() 91 | 92 | 93 | class ReadBackupPackagerTar(_AbstractReadBackupPackager, _AbstractBackupPackagerTar): 94 | _mode = "r" 95 | 96 | def __init__( 97 | self, name, path, archive_name, compression=None, compression_lvl=None 98 | ): 99 | # Do not set compression_lvl on readonly, as it can trigger some errors (with 100 | # XZ for example) 101 | super().__init__(name, path, archive_name, compression) 102 | 103 | @_opened_only 104 | def restore(self, name, target, stop_event=None): 105 | try: 106 | disk_tarinfo = self._tarfile.getmember(name) 107 | except KeyError: 108 | raise ImageNotFoundError(name, self.complete_path) 109 | 110 | if not os.path.exists(target) and target.endswith("/"): 111 | os.makedirs(target) 112 | if os.path.isdir(target): 113 | target = os.path.join(target, name) 114 | if os.path.isfile(target): 115 | raise ImageFoundError(target) 116 | 117 | buffersize = 2**20 118 | self._tarfile.fileobj.flush() 119 | try: 120 | with self._tarfile.extractfile(disk_tarinfo) as fsrc: 121 | with open(target, "xb") as fdst: 122 | while True: 123 | if stop_event and stop_event.is_set(): 124 | raise CancelledError() 125 | data = fsrc.read(buffersize) 126 | if not data: 127 | break 128 | 129 | if stop_event and stop_event.is_set(): 130 | raise CancelledError() 131 | fdst.write(data) 132 | except: 133 | if os.path.exists(target): 134 | os.remove(target) 135 | raise 136 | 137 | return target 138 | 139 | 140 | class WriteBackupPackagerTar(_AbstractWriteBackupPackager, _AbstractBackupPackagerTar): 141 | _mode = "x" 142 | 143 | @_opened_only 144 | def add(self, src, name=None, stop_event=None): 145 | """ 146 | WARNING: interrupting this function is unsafe, and will probably break the 147 | tar archive. 148 | 149 | Do not use tarfile.add() as it is a blocking operation. Workaround the issue by 150 | copying a part of what tarfile.add() does, but checks after each buffer on the 151 | stop_event. 152 | """ 153 | self.log(logging.DEBUG, "Add %s into %s", src, self.complete_path) 154 | tarinfo = self._tarfile.gettarinfo(src, arcname=name or os.path.basename(src)) 155 | 156 | if stop_event and stop_event.is_set(): 157 | raise CancelledError() 158 | 159 | buf = tarinfo.tobuf( 160 | self._tarfile.format, self._tarfile.encoding, self._tarfile.errors 161 | ) 162 | self._tarfile.fileobj.write(buf) 163 | self._tarfile.offset += len(buf) 164 | buffersize = 2**20 165 | 166 | with open(src, "rb") as fsrc: 167 | while True: 168 | if stop_event and stop_event.is_set(): 169 | raise CancelledError() 170 | data = fsrc.read(buffersize) 171 | if not data: 172 | break 173 | 174 | if stop_event and stop_event.is_set(): 175 | raise CancelledError() 176 | self._tarfile.fileobj.write(data) 177 | 178 | blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE) 179 | if remainder > 0: 180 | self._tarfile.fileobj.write(tarfile.NUL * (tarfile.BLOCKSIZE - remainder)) 181 | blocks += 1 182 | self._tarfile.offset += blocks * tarfile.BLOCKSIZE 183 | self._tarfile.members.append(tarinfo) 184 | 185 | return self.complete_path 186 | 187 | @_closed_only 188 | def remove_package(self, stop_event=None): 189 | if not os.path.exists(self.complete_path): 190 | raise FileNotFoundError(self.complete_path) 191 | 192 | return os.remove(self.complete_path) 193 | -------------------------------------------------------------------------------- /virt_backup/backups/packagers/unsupported.py: -------------------------------------------------------------------------------- 1 | from virt_backup.exceptions import UnsupportedPackagerError 2 | from . import ( 3 | _AbstractBackupPackager, 4 | _AbstractReadBackupPackager, 5 | _AbstractWriteBackupPackager, 6 | ) 7 | 8 | 9 | class UnsupportedBackupPackager(_AbstractBackupPackager): 10 | packager = "" 11 | reason = None 12 | 13 | def __init__(self, *args, **kwargs): 14 | raise UnsupportedPackagerError(self.packager, self.reason) 15 | 16 | def open(self): 17 | pass 18 | 19 | def close(self): 20 | pass 21 | 22 | def list(self): 23 | pass 24 | 25 | 26 | class UnsupportedReadBackupPackager( 27 | _AbstractReadBackupPackager, UnsupportedBackupPackager 28 | ): 29 | def restore(self, name, target): 30 | pass 31 | 32 | 33 | class UnsupportedWriteBackupPackager( 34 | _AbstractWriteBackupPackager, UnsupportedBackupPackager 35 | ): 36 | def add(self, src, name=None): 37 | pass 38 | 39 | def remove_package(self, name): 40 | pass 41 | 42 | 43 | class UnsupportedReadBackupPackagerZSTD(UnsupportedReadBackupPackager): 44 | packager = "zstd" 45 | 46 | 47 | class UnsupportedWriteBackupPackagerZSTD(UnsupportedWriteBackupPackager): 48 | packager = "zstd" 49 | -------------------------------------------------------------------------------- /virt_backup/backups/packagers/zstd.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import logging 3 | import os 4 | import re 5 | import shutil 6 | import zstandard as zstd 7 | 8 | from virt_backup.exceptions import CancelledError, ImageNotFoundError, ImageFoundError 9 | from . import ( 10 | _AbstractBackupPackager, 11 | _AbstractReadBackupPackager, 12 | _AbstractWriteBackupPackager, 13 | _opened_only, 14 | _closed_only, 15 | ) 16 | 17 | 18 | class _AbstractBackupPackagerZSTD(_AbstractBackupPackager): 19 | _mode = "" 20 | 21 | def __init__( 22 | self, name, path, name_prefix, compression_lvl=0, threads=0, *args, **kwargs 23 | ): 24 | super().__init__(name) 25 | 26 | #: Directory path to store the archives in. 27 | self.path = path 28 | 29 | #: Each file from this package will be stored as one separated archive. 30 | #: Their name will be prefixed by prefix_name 31 | self.name_prefix = name_prefix 32 | 33 | #: zstd_params is used by the compressor. 34 | self.zstd_params = zstd.ZstdCompressionParameters.from_level( 35 | compression_lvl, threads=threads 36 | ) 37 | 38 | @property 39 | def complete_path(self): 40 | return self.path 41 | 42 | def archive_path(self, name): 43 | """ 44 | WARNING: it does not check that the archive actually exists, 45 | just returns the path it should have 46 | """ 47 | return os.path.join(self.path, self._gen_archive_name(name)) 48 | 49 | def _gen_archive_name(self, filename): 50 | return "{}_{}.zstd".format(self.name_prefix, filename) 51 | 52 | def open(self): 53 | if not os.path.isdir(self.path): 54 | os.makedirs(self.path) 55 | 56 | self.closed = False 57 | return self 58 | 59 | @_opened_only 60 | def close(self): 61 | self.closed = True 62 | 63 | @_opened_only 64 | def list(self): 65 | results = [] 66 | pattern = re.compile(r"{}_(.*)\.zstd$".format(self.name_prefix)) 67 | for i in glob.glob(os.path.join(self.complete_path, "*.zstd")): 68 | m = pattern.match(os.path.basename(i)) 69 | if m: 70 | results.append(m.group(1)) 71 | 72 | return results 73 | 74 | 75 | class ReadBackupPackagerZSTD(_AbstractReadBackupPackager, _AbstractBackupPackagerZSTD): 76 | _mode = "r" 77 | 78 | @_opened_only 79 | def restore(self, name, target, stop_event=None): 80 | if name not in self.list(): 81 | raise ImageNotFoundError(self.archive_path(name), self.complete_path) 82 | 83 | if not os.path.exists(target) and target.endswith("/"): 84 | os.makedirs(target) 85 | if os.path.isdir(target): 86 | target = os.path.join(target, name) 87 | if os.path.isfile(target): 88 | raise ImageFoundError(target) 89 | 90 | buffersize = 2**20 91 | dctx = zstd.ZstdDecompressor() 92 | try: 93 | with open(self.archive_path(name), "rb") as ifh, open(target, "xb") as ofh: 94 | with dctx.stream_reader(ifh) as reader: 95 | while True: 96 | if stop_event and stop_event.is_set(): 97 | raise CancelledError() 98 | 99 | data = reader.read(buffersize) 100 | if not data: 101 | break 102 | 103 | if stop_event and stop_event.is_set(): 104 | raise CancelledError() 105 | ofh.write(data) 106 | except: 107 | if os.path.exists(target): 108 | os.remove(target) 109 | raise 110 | 111 | return target 112 | 113 | 114 | class WriteBackupPackagerZSTD( 115 | _AbstractWriteBackupPackager, _AbstractBackupPackagerZSTD 116 | ): 117 | _mode = "x" 118 | 119 | @_opened_only 120 | def add(self, src, name=None, stop_event=None): 121 | name = name or os.path.basename(src) 122 | self.log(logging.DEBUG, "Add %s into %s", src, self.archive_path(name)) 123 | 124 | cctx = zstd.ZstdCompressor(compression_params=self.zstd_params) 125 | try: 126 | with open(src, "rb") as ifh, open(self.archive_path(name), "wb") as ofh: 127 | with cctx.stream_writer(ofh) as writer: 128 | while True: 129 | if stop_event and stop_event.is_set(): 130 | raise CancelledError() 131 | 132 | data = ifh.read(zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE) 133 | if not data: 134 | break 135 | 136 | if stop_event and stop_event.is_set(): 137 | raise CancelledError() 138 | writer.write(data) 139 | except: 140 | if os.path.exists(self.archive_path(name)): 141 | os.remove(self.archive_path(name)) 142 | raise 143 | 144 | return self.archive_path(name) 145 | 146 | @_opened_only 147 | def remove(self, name): 148 | if name not in self.list(): 149 | raise ImageNotFoundError(self.archive_path(name), self.complete_path) 150 | 151 | os.remove(self.archive_path(name)) 152 | 153 | @_closed_only 154 | def remove_package(self, stop_event=None): 155 | if not os.path.exists(self.complete_path): 156 | raise FileNotFoundError(self.complete_path) 157 | 158 | with self: 159 | files = self.list() 160 | 161 | for i in files: 162 | if stop_event and stop_event.is_set(): 163 | raise CancelledError() 164 | os.remove(self.archive_path(i)) 165 | -------------------------------------------------------------------------------- /virt_backup/backups/snapshot.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import logging 3 | import os 4 | import subprocess 5 | import threading 6 | import arrow 7 | import libvirt 8 | import lxml.etree 9 | 10 | from virt_backup.domains import ( 11 | get_domain_disks_of, 12 | get_domain_incompatible_disks_of, 13 | get_xml_block_of_disk, 14 | ) 15 | from virt_backup.exceptions import DiskNotSnapshot, SnapshotNotStarted 16 | 17 | 18 | logger = logging.getLogger("virt_backup") 19 | 20 | 21 | class DomExtSnapshotCallbackRegistrer: 22 | _callback_id = None 23 | 24 | def __init__(self, conn): 25 | #: register callbacks, `{snapshot_path: callback}` 26 | self.callbacks = {} 27 | 28 | #: libvirt connection to use 29 | self.conn = conn 30 | 31 | def __enter__(self): 32 | return self.open() 33 | 34 | def __exit__(self, *exc): 35 | self.close() 36 | 37 | def open(self): 38 | self._callback_id = self.conn.domainEventRegisterAny( 39 | None, libvirt.VIR_DOMAIN_EVENT_ID_BLOCK_JOB, self.event_callback, None 40 | ) 41 | 42 | def close(self): 43 | self.conn.domainEventDeregisterAny(self._callback_id) 44 | 45 | def event_callback(self, conn, dom, snap, event_id, status, *args): 46 | if status != libvirt.VIR_DOMAIN_BLOCK_JOB_READY: 47 | if status == libvirt.VIR_DOMAIN_BLOCK_JOB_FAILED: 48 | logger.error("Block job failed for snapshot %s", snap) 49 | 50 | return None 51 | 52 | if snap not in self.callbacks: 53 | logger.error("Callback for snapshot %s called but not existing", snap) 54 | return None 55 | 56 | return self.callbacks[snap](conn, dom, snap, event_id, status, *args) 57 | 58 | 59 | class DomExtSnapshot: 60 | """ 61 | Libvirt domain backup 62 | """ 63 | 64 | metadatas = None 65 | 66 | def __init__( 67 | self, dom, disks, callbacks_registrer, conn=None, timeout=None, quiesce=False 68 | ): 69 | #: domain to snapshot. Has to be a libvirt.virDomain object 70 | self.dom = dom 71 | 72 | self.disks = disks 73 | 74 | self._callbacks_registrer = callbacks_registrer 75 | 76 | #: timeout when waiting for the block pivot to end. Infinite wait if 77 | # timeout is None 78 | self.timeout = timeout 79 | 80 | #: libvirt connection to use. If not sent, will use the connection used 81 | # for self.domain 82 | self.conn = self.dom._conn if conn is None else conn 83 | 84 | #: enable or not quiesce. If it is not supported, the snapshot will fallback 85 | # to quiesce deactivated. 86 | self.quiesce = quiesce 87 | 88 | #: used to trigger when block pivot ends, by snapshot path 89 | self._wait_for_pivot = defaultdict(threading.Event) 90 | 91 | def start(self): 92 | """ 93 | Start the external snapshot 94 | """ 95 | snapshot = self.external_snapshot() 96 | 97 | # all of our disks are frozen, so the backup date is right now 98 | snapshot_date = arrow.now() 99 | 100 | self.metadatas = { 101 | "date": snapshot_date, 102 | "disks": { 103 | disk: { 104 | "src": prop["src"], 105 | "type": self._get_disk_type(disk), 106 | "snapshot": self._get_snapshot_path(prop["src"], snapshot), 107 | } 108 | for disk, prop in self.disks.items() 109 | }, 110 | } 111 | 112 | return self.metadatas 113 | 114 | def external_snapshot(self): 115 | """ 116 | Create an external snapshot in order to freeze the base image 117 | """ 118 | snap_xml = self.gen_libvirt_snapshot_xml() 119 | flags = self._get_snapshot_flags(quiesce=self.quiesce) 120 | try: 121 | return self.dom.snapshotCreateXML(snap_xml, flags) 122 | except libvirt.libvirtError as e: 123 | if self.quiesce: 124 | # Quiesce snapshots can fail if no agent is present. Retry without it. 125 | logger.debug( 126 | "%s: snapshot with Quiesce failed: %s", 127 | self.dom.name(), 128 | e.get_error_message(), 129 | ) 130 | logger.warning( 131 | ( 132 | "%s: snapshot with Quiesce required, but failed. It can be due " 133 | "to the lack of a QEMU guest agent running inside the VM. " 134 | "Retrying without Quiesce.", 135 | ), 136 | self.dom.name(), 137 | ) 138 | flags = self._get_snapshot_flags(quiesce=False) 139 | return self.dom.snapshotCreateXML(snap_xml, flags) 140 | raise 141 | 142 | def _get_snapshot_flags(self, quiesce=False): 143 | flags = ( 144 | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY 145 | + libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC 146 | + libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA 147 | ) 148 | if quiesce: 149 | flags += libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE 150 | 151 | return flags 152 | 153 | def gen_libvirt_snapshot_xml(self): 154 | """ 155 | Generate a xml defining the snapshot 156 | """ 157 | root_el = lxml.etree.Element("domainsnapshot") 158 | xml_tree = root_el.getroottree() 159 | 160 | descr_el = lxml.etree.Element("description") 161 | root_el.append(descr_el) 162 | descr_el.text = "Pre-backup external snapshot" 163 | 164 | disks_el = lxml.etree.Element("disks") 165 | root_el.append(disks_el) 166 | 167 | all_domain_disks = get_domain_disks_of( 168 | lxml.etree.fromstring( 169 | self.dom.XMLDesc(), lxml.etree.XMLParser(resolve_entities=False) 170 | ) 171 | ) 172 | for d in sorted(all_domain_disks.keys()): 173 | disk_el = lxml.etree.Element("disk") 174 | disk_el.attrib["name"] = d 175 | # Skipped disks need to have an entry, with a snapshot value 176 | # explicitly set to "no", otherwise libvirt will be created a 177 | # snapshot for them. 178 | disk_el.attrib["snapshot"] = "external" if d in self.disks else "no" 179 | disks_el.append(disk_el) 180 | 181 | non_snapshotable_disks = get_domain_incompatible_disks_of( 182 | lxml.etree.fromstring( 183 | self.dom.XMLDesc(), lxml.etree.XMLParser(resolve_entities=False) 184 | ) 185 | ) 186 | for d in non_snapshotable_disks: 187 | disk_el = lxml.etree.Element("disk") 188 | disk_el.attrib["name"] = d 189 | disk_el.attrib["snapshot"] = "no" 190 | disks_el.append(disk_el) 191 | 192 | return lxml.etree.tostring(xml_tree, pretty_print=True).decode() 193 | 194 | def _get_snapshot_path(self, parent_disk_path, snapshot): 195 | return "{}.{}".format(os.path.splitext(parent_disk_path)[0], snapshot.getName()) 196 | 197 | def _get_disk_type(self, disk): 198 | dom_xml = lxml.etree.fromstring( 199 | self.dom.XMLDesc(), lxml.etree.XMLParser(resolve_entities=False) 200 | ) 201 | disk_xml = get_xml_block_of_disk(dom_xml, disk) 202 | return disk_xml.xpath("driver")[0].get("type", "raw") 203 | 204 | def clean(self): 205 | if not self.metadatas: 206 | raise SnapshotNotStarted() 207 | 208 | disks = tuple(self.metadatas["disks"].keys()) 209 | snapshot_paths = tuple( 210 | os.path.abspath(self.metadatas["disks"][disk]["snapshot"]) for disk in disks 211 | ) 212 | try: 213 | for disk in disks: 214 | try: 215 | self.clean_for_disk(disk) 216 | except Exception as e: 217 | logger.critical( 218 | ( 219 | "Failed to clean temp files of disk {} " "for domain {}: {}" 220 | ).format(disk, self.dom.name(), e) 221 | ) 222 | raise 223 | finally: 224 | for snapshot in snapshot_paths: 225 | self._callbacks_registrer.callbacks.pop(snapshot, None) 226 | 227 | def clean_for_disk(self, disk): 228 | if not self.metadatas: 229 | raise SnapshotNotStarted() 230 | elif disk not in self.metadatas["disks"]: 231 | raise DiskNotSnapshot(disk) 232 | 233 | snapshot_path = os.path.abspath(self.metadatas["disks"][disk]["snapshot"]) 234 | disk_path = os.path.abspath(self.metadatas["disks"][disk]["src"]) 235 | disk_type = self.metadatas["disks"][disk]["type"] 236 | 237 | # Do not commit and pivot if our snapshot is not the current top disk 238 | current_disk_path = ( 239 | get_xml_block_of_disk(self.dom.XMLDesc(), disk) 240 | .xpath("source")[0] 241 | .get("file") 242 | ) 243 | if os.path.abspath(current_disk_path) != snapshot_path: 244 | logger.warning( 245 | "It seems that the domain configuration (and specifically the " 246 | "one related to its disks) has been changed. The current disk " 247 | "will not be committed nor pivoted with the external " 248 | "snapshot, to not break the backing chain.\n\n" 249 | "You might want to manually check, where your domain image is " 250 | "stored, if no temporary file is remaining ({}).".format( 251 | os.path.dirname(current_disk_path) 252 | ) 253 | ) 254 | return 255 | 256 | if self.dom.isActive(): 257 | self.blockcommit_disk(disk) 258 | else: 259 | self._qemu_img_commit(disk_path, snapshot_path) 260 | self._manually_pivot_disk(disk, disk_path, disk_type) 261 | os.remove(snapshot_path) 262 | 263 | self.metadatas["disks"].pop(disk) 264 | self._callbacks_registrer.callbacks.pop(snapshot_path, None) 265 | 266 | def blockcommit_disk(self, disk): 267 | """ 268 | Block commit 269 | 270 | Will allow to merge the external snapshot previously created with the 271 | disk main image 272 | Wait for the pivot to be triggered in case of active blockcommit. 273 | 274 | :param disk: diskname to blockcommit 275 | """ 276 | snapshot_path = os.path.abspath(self.metadatas["disks"][disk]["snapshot"]) 277 | self._callbacks_registrer.callbacks[snapshot_path] = self._pivot_callback 278 | 279 | logger.debug("%s: blockcommit %s to pivot snapshot", self.dom.name(), disk) 280 | self.dom.blockCommit( 281 | disk, 282 | None, 283 | None, 284 | 0, 285 | ( 286 | libvirt.VIR_DOMAIN_BLOCK_COMMIT_ACTIVE 287 | + libvirt.VIR_DOMAIN_BLOCK_COMMIT_SHALLOW 288 | ), 289 | ) 290 | 291 | self._wait_for_pivot[snapshot_path].wait(timeout=self.timeout) 292 | self._wait_for_pivot.pop(snapshot_path) 293 | 294 | def _pivot_callback(self, conn, dom, snap, event_id, status, *args): 295 | """ 296 | Pivot the snapshot 297 | 298 | If the received domain matches with the one associated to this backup, 299 | abort the blockjob, pivot it and delete the snapshot. 300 | """ 301 | domain_matches = dom.UUID() == self.dom.UUID() 302 | if status == libvirt.VIR_DOMAIN_BLOCK_JOB_READY and domain_matches: 303 | dom.blockJobAbort(snap, libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) 304 | os.remove(snap) 305 | self._wait_for_pivot[os.path.abspath(snap)].set() 306 | 307 | def _qemu_img_commit(self, parent_disk_path, snapshot_path): 308 | """ 309 | Use qemu-img to BlockCommit 310 | 311 | Libvirt does not allow to blockcommit a inactive domain, so have to use 312 | qemu-img instead. 313 | """ 314 | return subprocess.check_call( 315 | ("qemu-img", "commit", "-b", parent_disk_path, snapshot_path) 316 | ) 317 | 318 | def _manually_pivot_disk(self, disk, src, disk_type): 319 | """ 320 | Replace the disk src 321 | 322 | :param disk: disk name 323 | :param src: new disk path 324 | """ 325 | dom_xml = lxml.etree.fromstring( 326 | self.dom.XMLDesc(), lxml.etree.XMLParser(resolve_entities=False) 327 | ) 328 | 329 | disk_xml = get_xml_block_of_disk(dom_xml, disk) 330 | disk_xml.xpath("source")[0].set("file", src) 331 | disk_xml.xpath("driver")[0].set("type", disk_type) 332 | 333 | if self.conn.getLibVersion() >= 3000000: 334 | # update a disk is broken in libvirt < 3.0 335 | return self.dom.updateDeviceFlags( 336 | lxml.etree.tostring(disk_xml).decode(), 337 | libvirt.VIR_DOMAIN_AFFECT_CONFIG, 338 | ) 339 | else: 340 | return self.conn.defineXML(lxml.etree.tostring(dom_xml).decode()) 341 | -------------------------------------------------------------------------------- /virt_backup/compat_layers/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["config", "definition"] 2 | 3 | from . import config 4 | from . import definition 5 | -------------------------------------------------------------------------------- /virt_backup/compat_layers/config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import logging 3 | import yaml 4 | 5 | 6 | logger = logging.getLogger("virt_backup") 7 | 8 | 9 | def convert_warn(config): 10 | converters = (ToV0_4,) 11 | for c in converters: 12 | c().convert_warn(config) 13 | 14 | 15 | class ConfigConverter(ABC): 16 | @abstractmethod 17 | def convert_warn(self, config): 18 | pass 19 | 20 | @abstractmethod 21 | def convert(self, config): 22 | pass 23 | 24 | 25 | class ToV0_4(ConfigConverter): 26 | """ 27 | Convert from v0.1 to v0.4 28 | """ 29 | 30 | def convert_warn(self, config): 31 | for m in self.convert(config): 32 | logger.warning("%s\n", m) 33 | 34 | def convert(self, config): 35 | warnings = [] 36 | for group, group_config in config["groups"].items(): 37 | convertion = self.convert_group(group_config) 38 | msg = convertion["msg"] 39 | changed = convertion["changed"] 40 | 41 | if msg: 42 | warnings.append( 43 | 'Action needed for group "{}": {}.\nAdapt its config for:\n\t{}'.format( 44 | group, msg, yaml.safe_dump(changed, default_flow_style=False) 45 | ) 46 | ) 47 | 48 | return warnings 49 | 50 | def convert_group(self, group): 51 | if not ("compression" in group or "compression_lvl" in group): 52 | return {"changed": {}, "msg": ""} 53 | 54 | changed = {} 55 | 56 | if "compression" in group: 57 | old_compression = group.pop("compression") 58 | new_packager = "" 59 | new_packager_opts = {} 60 | 61 | if old_compression is None: 62 | new_packager = "directory" 63 | else: 64 | new_packager = "tar" 65 | if old_compression != "tar": 66 | new_packager_opts["compression"] = old_compression 67 | 68 | for d in (group, changed): 69 | d["packager"] = new_packager 70 | if new_packager_opts: 71 | d["packager_opts"] = new_packager_opts 72 | 73 | if "compression_lvl" in group: 74 | compression_lvl = group.pop("compression_lvl") 75 | 76 | for d in (group, changed): 77 | packager_opts = d.get("packager_opts", {}) 78 | packager_opts["compression_lvl"] = compression_lvl 79 | d["packager_opts"] = packager_opts 80 | 81 | msg = ( 82 | "current config uses 'compress' and 'compression_lvl' options. " 83 | "It has been deprecated in favor of 'packager' and 'packager_opts'" 84 | ) 85 | return {"changed": changed, "msg": msg} 86 | -------------------------------------------------------------------------------- /virt_backup/compat_layers/definition.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import logging 3 | import re 4 | 5 | import arrow 6 | from packaging.version import parse as version_parser 7 | import yaml 8 | 9 | 10 | logger = logging.getLogger("virt_backup") 11 | 12 | 13 | def convert(definition): 14 | converters = (ToV0_4(),) 15 | for c in converters: 16 | def_version = version_parser(definition["version"]) 17 | if c.is_needed(def_version): 18 | logger.debug( 19 | "definition %s needs convertion update to v%s", 20 | definition.get("name") or definition["domain_name"], 21 | c.from_version_to[1], 22 | ) 23 | c.convert(definition) 24 | 25 | 26 | class DefConverter(ABC): 27 | from_version_to = () 28 | _parsed_versions = () 29 | 30 | @abstractmethod 31 | def convert(self, config): 32 | pass 33 | 34 | def is_needed(self, def_parsed_version): 35 | return self._parsed_versions[0] <= def_parsed_version < self._parsed_versions[1] 36 | 37 | 38 | class ToV0_4(DefConverter): 39 | """ 40 | Convert from v0.1 to v0.4 41 | """ 42 | 43 | from_version_to = ("0.1.0", "0.4.0") 44 | _parsed_versions = (version_parser("0.1.0"), version_parser("0.4.0")) 45 | 46 | def convert(self, definition): 47 | self.convert_compression(definition) 48 | self.convert_name(definition) 49 | definition["version"] = self.from_version_to[1] 50 | 51 | def convert_compression(self, definition): 52 | if "compression" in definition: 53 | old_compression = definition.pop("compression") 54 | new_packager = "" 55 | new_packager_opts = {} 56 | 57 | if old_compression is None: 58 | new_packager = "directory" 59 | else: 60 | new_packager = "tar" 61 | if old_compression != "tar": 62 | new_packager_opts["compression"] = old_compression 63 | 64 | definition["packager"] = { 65 | "type": new_packager, 66 | "opts": new_packager_opts or {}, 67 | } 68 | elif "packager" not in definition: 69 | definition["packager"] = {"type": "directory", "opts": {}} 70 | 71 | if "compression_lvl" in definition: 72 | compression_lvl = definition.pop("compression_lvl") 73 | definition["packager"]["opts"]["compression_lvl"] = compression_lvl 74 | 75 | def convert_name(self, definition): 76 | if "tar" in definition: 77 | archive_name_search = re.match(r"(.*)\.tar\.?.*$", definition["tar"]) 78 | if archive_name_search: 79 | definition["name"] = archive_name_search.group(1) 80 | else: 81 | definition["name"] = definition["tar"] 82 | definition.pop("tar") 83 | 84 | if "name" not in definition: 85 | snapdate = arrow.get(definition["date"]) 86 | str_snapdate = snapdate.strftime("%Y%m%d-%H%M%S") 87 | definition["name"] = "{}_{}_{}".format( 88 | str_snapdate, definition["domain_id"], definition["domain_name"] 89 | ) 90 | -------------------------------------------------------------------------------- /virt_backup/compat_layers/pending_info.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import lxml.etree 3 | 4 | from packaging.version import parse as version_parser 5 | 6 | from virt_backup.domains import get_xml_block_of_disk 7 | from . import definition as definition_compat 8 | 9 | 10 | logger = logging.getLogger("virt_backup") 11 | 12 | 13 | def convert(pending_info): 14 | converters = (ToV0_4(), V0_4ToV0_5_2()) 15 | for c in converters: 16 | def_version = version_parser(pending_info["version"]) 17 | if c.is_needed(def_version): 18 | logger.debug( 19 | "pending_info %s needs convertion update to v%s", 20 | pending_info.get("name") or pending_info["domain_name"], 21 | c.from_version_to[1], 22 | ) 23 | c.convert(pending_info) 24 | 25 | 26 | class PendingInfoConverter(definition_compat.DefConverter): 27 | pass 28 | 29 | 30 | class ToV0_4(definition_compat.ToV0_4): 31 | pass 32 | 33 | 34 | class V0_4ToV0_5_2(PendingInfoConverter): 35 | from_version_to = ("0.4.0", "0.5.2") 36 | _parsed_versions = (version_parser("0.4.0"), version_parser("0.5.2")) 37 | 38 | def convert(self, pending_info): 39 | pending_info["version"] = self.from_version_to[1] 40 | 41 | for disk, prop in pending_info.get("disks", dict()).items(): 42 | if "type" not in prop: 43 | break 44 | else: 45 | # All disks have a type set, no need to convert. 46 | return 47 | 48 | dom_xml = lxml.etree.fromstring( 49 | pending_info["domain_xml"], lxml.etree.XMLParser(resolve_entities=False) 50 | ) 51 | 52 | for disk, prop in pending_info.get("disks", dict()).items(): 53 | if "type" in prop: 54 | continue 55 | 56 | disk_xml = get_xml_block_of_disk(dom_xml, disk) 57 | prop["type"] = disk_xml.xpath("driver")[0].get("type") 58 | -------------------------------------------------------------------------------- /virt_backup/config.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import logging 3 | import os 4 | import appdirs 5 | import yaml 6 | 7 | from virt_backup import APP_NAME 8 | 9 | 10 | logger = logging.getLogger("virt_backup") 11 | 12 | os.environ["XDG_CONFIG_DIRS"] = "/etc" 13 | CONFIG_DIRS = ( 14 | appdirs.user_config_dir(APP_NAME), 15 | appdirs.site_config_dir(APP_NAME), 16 | ) 17 | CONFIG_FILENAME = "config.yml" 18 | 19 | 20 | def get_config(custom_path=None): 21 | """ 22 | Get config file and load it with yaml 23 | 24 | :returns: loaded config in yaml, as a dict object 25 | """ 26 | config_path = "" 27 | if custom_path: 28 | config_path = custom_path 29 | else: 30 | for d in CONFIG_DIRS: 31 | config_path = os.path.join(d, CONFIG_FILENAME) 32 | if os.path.isfile(config_path): 33 | break 34 | try: 35 | with open(config_path, "r") as config_file: 36 | return yaml.safe_load(config_file) 37 | except FileNotFoundError as e: 38 | logger.debug(e) 39 | if custom_path: 40 | logger.error("Configuration file {} not found.".format(custom_path)) 41 | else: 42 | logger.error( 43 | "No configuration file can be found. Please create a " 44 | "config.yml in one of these directories:\n" 45 | "{}".format(", ".join(CONFIG_DIRS)) 46 | ) 47 | raise FileNotFoundError 48 | 49 | 50 | class Config(dict): 51 | """ 52 | Works like a dict but can be filled directly from a yaml configuration 53 | file. Inspired from the Flask Config class (a part of their code has been 54 | copied here). 55 | 56 | :param defaults: an optional dictionary of default values 57 | """ 58 | 59 | def __init__(self, defaults=None): 60 | dict.__init__(self, defaults or {}) 61 | self.refresh_global_logger_lvl() 62 | 63 | def refresh_global_logger_lvl(self): 64 | if self.get("debug", None): 65 | logging.getLogger("virt_backup").setLevel(logging.DEBUG) 66 | else: 67 | logging.getLogger("virt_backup").setLevel(logging.INFO) 68 | 69 | def from_dict(self, conf_dict): 70 | """ 71 | Copy values from dict 72 | """ 73 | self.update(conf_dict) 74 | 75 | def from_str(self, conf_str): 76 | """ 77 | Read configuration from string 78 | """ 79 | self.from_dict(yaml.safe_load(conf_str)) 80 | 81 | def from_yaml(self, filename, silent=False): 82 | """ 83 | Updates the values in the config from a yaml file. 84 | 85 | :param filename: filename of the config. 86 | :param silent: set to ``True`` if you want silent failure for missing 87 | files. 88 | """ 89 | filename = os.path.join(filename) 90 | try: 91 | with open(filename) as conf_yaml: 92 | self.from_dict(yaml.safe_load(conf_yaml)) 93 | except IOError as e: 94 | if silent and e.errno in (errno.ENOENT, errno.EISDIR): 95 | return False 96 | e.strerror = "Unable to load configuration file (%s)" % e.strerror 97 | raise 98 | return True 99 | 100 | def get_groups(self): 101 | """ 102 | Get backup groups with default values 103 | """ 104 | groups = {} 105 | for g, prop in self.get("groups", {}).items(): 106 | d = self.get("default", {}).copy() 107 | d.update(prop) 108 | groups[g] = d 109 | return groups 110 | -------------------------------------------------------------------------------- /virt_backup/domains.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | import lxml.etree 4 | 5 | from virt_backup.exceptions import DiskNotFoundError 6 | 7 | 8 | logger = logging.getLogger("virt_backup") 9 | 10 | 11 | def get_domain_disks_of(dom_xml, *filter_dev): 12 | """ 13 | Get disks from the domain xml 14 | 15 | :param dom_xml: domain xml to extract the disks from 16 | :param filter_dev: return only disks for which the dev name matches 17 | with one in filter_dev. If no parameter, will return 18 | every disks. 19 | """ 20 | if isinstance(dom_xml, str): 21 | dom_xml = lxml.etree.fromstring( 22 | dom_xml, lxml.etree.XMLParser(resolve_entities=False) 23 | ) 24 | filter_dev = sorted(list(filter_dev)) 25 | disks = {} 26 | for elem in dom_xml.xpath("devices/disk"): 27 | try: 28 | if elem.get("device", None) != "disk": 29 | continue 30 | 31 | if elem.get("type", None) != "file": 32 | logger.debug( 33 | "Disk %s is not a file, which not compatible with virt-backup", 34 | elem.xpath("target")[0].get("dev"), 35 | ) 36 | continue 37 | 38 | dev = elem.xpath("target")[0].get("dev") 39 | if filter_dev and dev not in filter_dev: 40 | continue 41 | src = elem.xpath("source")[0].get("file") 42 | disk_type = elem.xpath("driver")[0].get("type") 43 | 44 | disks[dev] = {"src": src, "type": disk_type} 45 | 46 | # all disks captured 47 | if filter_dev in list(sorted(disks.keys())): 48 | break 49 | except IndexError: 50 | continue 51 | 52 | for disk in filter_dev: 53 | if disk not in disks: 54 | raise DiskNotFoundError(disk) 55 | 56 | return disks 57 | 58 | 59 | def get_domain_incompatible_disks_of(dom_xml, *filter_dev): 60 | """ 61 | Get incompatible (non snapshotable) disks from the domain xml 62 | 63 | :param dom_xml: domain xml to extract the disks from 64 | """ 65 | if isinstance(dom_xml, str): 66 | dom_xml = lxml.etree.fromstring( 67 | dom_xml, lxml.etree.XMLParser(resolve_entities=False) 68 | ) 69 | disks = [] 70 | for elem in dom_xml.xpath("devices/disk"): 71 | try: 72 | if elem.get("device", None) != "disk": 73 | continue 74 | 75 | if elem.get("type", None) == "file": 76 | continue 77 | 78 | dev = elem.xpath("target")[0].get("dev") 79 | disks.append(dev) 80 | except IndexError: 81 | continue 82 | 83 | return tuple(sorted(disks)) 84 | 85 | 86 | def get_xml_block_of_disk(dom_xml, disk): 87 | if isinstance(dom_xml, str): 88 | dom_xml = lxml.etree.fromstring( 89 | dom_xml, lxml.etree.XMLParser(resolve_entities=False) 90 | ) 91 | for elem in dom_xml.xpath("devices/disk"): 92 | try: 93 | if elem.get("device", None) == "disk": 94 | dev = elem.xpath("target")[0].get("dev") 95 | if dev == disk: 96 | return elem 97 | except IndexError: 98 | continue 99 | raise DiskNotFoundError(disk) 100 | 101 | 102 | def search_domains_regex(pattern, conn): 103 | """ 104 | Yield all domains matching with a regex 105 | 106 | :param pattern: regex to match on all domain names listed by libvirt 107 | :param conn: connection with libvirt 108 | """ 109 | c_pattern = re.compile(pattern) 110 | for domain in conn.listAllDomains(): 111 | domain_name = domain.name() 112 | if c_pattern.match(domain_name): 113 | yield domain_name 114 | -------------------------------------------------------------------------------- /virt_backup/exceptions.py: -------------------------------------------------------------------------------- 1 | class CancelledError(Exception): 2 | def __init__(self): 3 | super().__init__("process cancelled") 4 | 5 | 6 | class BackupNotFoundError(Exception): 7 | def __init__(self): 8 | super().__init__("backup not found") 9 | 10 | 11 | class BackupsFailureInGroupError(Exception): 12 | def __init__(self, completed_backups, exceptions): 13 | """ 14 | :param completed_backups: dictionary of completed backups. 15 | {dom_name: completed_backup} 16 | :param exceptions: dictionary of exceptions. {dom_name: exception} 17 | """ 18 | super().__init__( 19 | "backups failed for domains: {}".format( 20 | ", ".join(sorted(exceptions.keys())) 21 | ) 22 | ) 23 | self.completed_backups = completed_backups 24 | self.exceptions = exceptions 25 | 26 | 27 | class DiskNotFoundError(Exception): 28 | """ 29 | Disk not found in a domain 30 | """ 31 | 32 | def __init__(self, disk): 33 | super().__init__("disk {} not found".format(disk)) 34 | 35 | 36 | class DomainNotFoundError(Exception): 37 | def __init__(self, domain): 38 | super().__init__("domain {} not found".format(domain)) 39 | 40 | 41 | class ImageNotFoundError(Exception): 42 | def __init__(self, image, target): 43 | super().__init__("Image {} not found in {}".format(image, target)) 44 | 45 | 46 | class ImageFoundError(Exception): 47 | def __init__(self, image): 48 | super().__init__("Image {} found".format(image)) 49 | 50 | 51 | class DomainRunningError(Exception): 52 | """ 53 | Domain is running when a task would need it to be shutdown 54 | """ 55 | 56 | def __init__(self, domain): 57 | message = ( 58 | "DomainRunningError: domain {} need to be shutdown to perform the " "task" 59 | ).format(domain) 60 | super().__init__(message) 61 | 62 | 63 | class SnapshotNotStarted(Exception): 64 | def __init__(self): 65 | super().__init__("snapshot not started") 66 | 67 | 68 | class DiskNotSnapshot(Exception): 69 | def __init__(self, disk): 70 | super().__init__("disk {} not snapshot".format(disk)) 71 | 72 | 73 | class BackupPackagerNotOpenedError(Exception): 74 | def __init__(self, packager): 75 | super().__init__("Backup packager {} not opened".format(packager.name)) 76 | 77 | 78 | class BackupPackagerOpenedError(Exception): 79 | def __init__(self, packager): 80 | super().__init__( 81 | "Backup packager {} opened, needs to be closed".format(packager.name) 82 | ) 83 | 84 | 85 | class UnsupportedPackagerError(Exception): 86 | def __init__(self, packager_name, reason=None): 87 | msg = "Packager {} unsupported".format(packager_name) 88 | if reason: 89 | msg = "{}: {}".format(msg, reason) 90 | 91 | super().__init__(msg) 92 | -------------------------------------------------------------------------------- /virt_backup/groups/__init__.py: -------------------------------------------------------------------------------- 1 | from .complete import CompleteBackupGroup, complete_groups_from_dict 2 | from .pending import BackupGroup, groups_from_dict 3 | 4 | 5 | __all__ = [ 6 | "CompleteBackupGroup", 7 | "BackupGroup", 8 | "complete_groups_from_dict", 9 | "groups_from_dict", 10 | ] 11 | -------------------------------------------------------------------------------- /virt_backup/groups/complete.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import glob 3 | import json 4 | import logging 5 | import os 6 | 7 | from virt_backup.backups import ( 8 | build_dom_complete_backup_from_def, 9 | build_dom_backup_from_pending_info, 10 | ) 11 | from virt_backup.exceptions import BackupNotFoundError, DomainNotFoundError 12 | from .pattern import domains_matching_with_patterns 13 | 14 | 15 | logger = logging.getLogger("virt_backup") 16 | 17 | 18 | def list_backups_by_domain(backup_dir): 19 | """ 20 | Group all avaible backups by domain, in a dict 21 | 22 | Backups have to respect the structure: backup_dir/domain_name/*backups* 23 | 24 | :returns: {domain_name: [(definition_path, definition_dict), …], …} 25 | :rtype: dict 26 | """ 27 | return _list_json_following_pattern_by_domain(backup_dir, "*/*.json") 28 | 29 | 30 | def list_broken_backups_by_domain(backup_dir): 31 | """ 32 | Group all broken backups by domain, in a dict 33 | 34 | Backups have to respect the structure: backup_dir/domain_name/*backups* 35 | 36 | :returns: {domain_name: [(backup_dir, pending_info_dict), …], …} 37 | :rtype: dict 38 | """ 39 | return _list_json_following_pattern_by_domain(backup_dir, "*/*.json.pending") 40 | 41 | 42 | def _list_json_following_pattern_by_domain(directory, glob_pattern): 43 | backups = {} 44 | for json_file in glob.glob(os.path.join(directory, glob_pattern)): 45 | logger.debug("{} detected".format(json_file)) 46 | try: 47 | with open(json_file, "r") as definition_file: 48 | try: 49 | metadata = json.load(definition_file) 50 | except Exception as e: 51 | logger.debug("Error for file {}: {}".format(json_file, e)) 52 | continue 53 | except FileNotFoundError as e: 54 | logger.warning(f"File not found or already removed: {e}") 55 | continue 56 | domain_name = metadata["domain_name"] 57 | if domain_name not in backups: 58 | backups[domain_name] = [] 59 | backups[domain_name].append((json_file, metadata)) 60 | return backups 61 | 62 | 63 | def complete_groups_from_dict(groups_dict, conn=None, callbacks_registrer=None): 64 | """ 65 | Construct and yield CompleteBackupGroups from a dict (typically as stored 66 | in config) 67 | 68 | :param groups_dict: dict of groups properties (take a look at the 69 | config syntax for more info) 70 | :param conn: libvirt connection 71 | :param callbacks_registrer: handle snapshot events. Required if conn is set 72 | """ 73 | 74 | def build(name, properties): 75 | attrs = {} 76 | attrs["hosts"] = [] 77 | for host in properties.get("hosts", []): 78 | if isinstance(host, str): 79 | attrs["hosts"].append(host) 80 | else: 81 | try: 82 | attrs["hosts"].append(host["host"]) 83 | except KeyError as e: 84 | logger.error( 85 | "Configuration error, missing host for lines: \n" 86 | "{}".format(host) 87 | ) 88 | raise e 89 | 90 | if properties.get("target", None): 91 | attrs["backup_dir"] = properties["target"] 92 | 93 | complete_backup_group = CompleteBackupGroup( 94 | name=name, conn=conn, callbacks_registrer=callbacks_registrer, **attrs 95 | ) 96 | return complete_backup_group 97 | 98 | for group_name, group_properties in groups_dict.items(): 99 | yield build(group_name, group_properties) 100 | 101 | 102 | class CompleteBackupGroup: 103 | """ 104 | Group of complete libvirt domain backups 105 | """ 106 | 107 | def __init__( 108 | self, 109 | name="unnamed", 110 | backup_dir=None, 111 | hosts=None, 112 | conn=None, 113 | backups=None, 114 | broken_backups=None, 115 | callbacks_registrer=None, 116 | ): 117 | #: dict of domains and their backups (CompleteDomBackup) 118 | self.backups = backups or dict() 119 | 120 | #: dict of domains and their broken/aborted backups (DomBackup) 121 | self.broken_backups = broken_backups or dict() 122 | 123 | #: hosts_patterns 124 | self.hosts = hosts or [] 125 | 126 | self.name = name 127 | 128 | #: base backup directory 129 | self.backup_dir = backup_dir 130 | 131 | #: connection to libvirt 132 | self.conn = conn 133 | 134 | #: callbacks registrer, used to clean broken backups. Needed if 135 | # self.conn is set. 136 | self._callbacks_registrer = callbacks_registrer 137 | 138 | if self.conn and not self._callbacks_registrer: 139 | raise AttributeError("callbacks_registrer needed if conn is given") 140 | 141 | def scan_backup_dir(self): 142 | if not self.backup_dir: 143 | raise NotADirectoryError("backup_dir not defined") 144 | 145 | self._build_backups() 146 | if self.conn: 147 | self._build_broken_backups() 148 | else: 149 | logger.debug( 150 | "No libvirt connection for group {}, does not scan for " 151 | "possible broken backups.".format(self.conn) 152 | ) 153 | 154 | def _build_backups(self): 155 | backups = {} 156 | backups_by_domain = list_backups_by_domain(self.backup_dir) 157 | domains_to_include = domains_matching_with_patterns( 158 | backups_by_domain.keys(), self.hosts 159 | ) 160 | for dom_name in domains_to_include: 161 | backups[dom_name] = sorted( 162 | ( 163 | build_dom_complete_backup_from_def( 164 | definition, 165 | backup_dir=os.path.dirname(definition_filename), 166 | definition_filename=definition_filename, 167 | ) 168 | for definition_filename, definition in backups_by_domain[dom_name] 169 | ), 170 | key=lambda b: b.date, 171 | ) 172 | 173 | self.backups = backups 174 | 175 | def _build_broken_backups(self): 176 | broken_backups = {} 177 | broken_backups_by_domain = list_broken_backups_by_domain(self.backup_dir) 178 | domains_to_include = domains_matching_with_patterns( 179 | broken_backups_by_domain.keys(), self.hosts 180 | ) 181 | for dom_name in domains_to_include: 182 | broken_backups[dom_name] = sorted( 183 | ( 184 | build_dom_backup_from_pending_info( 185 | pending_info, 186 | backup_dir=os.path.dirname(pending_info_json), 187 | conn=self.conn, 188 | callbacks_registrer=self._callbacks_registrer, 189 | ) 190 | for pending_info_json, pending_info in broken_backups_by_domain[ 191 | dom_name 192 | ] 193 | ), 194 | key=lambda b: b.pending_info.get("date", None), 195 | ) 196 | 197 | self.broken_backups = broken_backups 198 | 199 | def get_backup_at_date(self, domain_name, date): 200 | try: 201 | backups = self.backups[domain_name] 202 | except KeyError: 203 | raise DomainNotFoundError(domain_name) 204 | 205 | for b in backups: 206 | if b.date == date: 207 | return b 208 | 209 | raise BackupNotFoundError 210 | 211 | def get_n_nearest_backup(self, domain_name, date, n): 212 | try: 213 | backups = self.backups[domain_name] 214 | except KeyError: 215 | raise DomainNotFoundError(domain_name) 216 | 217 | diff_list = sorted(backups, key=lambda b: abs(b.date - date)) 218 | 219 | return diff_list[:n] if diff_list else None 220 | 221 | def clean(self, hourly=5, daily=5, weekly=5, monthly=5, yearly=5): 222 | backups_removed = set() 223 | for domain, domain_backups in self.backups.items(): 224 | domain_backups = sorted(domain_backups, key=lambda b: b.date) 225 | keep_backups = set() 226 | 227 | keep_backups.update( 228 | self._keep_n_periodic_backups(domain_backups, "hour", hourly), 229 | self._keep_n_periodic_backups(domain_backups, "day", daily), 230 | self._keep_n_periodic_backups(domain_backups, "week", weekly), 231 | self._keep_n_periodic_backups(domain_backups, "month", monthly), 232 | self._keep_n_periodic_backups(domain_backups, "year", yearly), 233 | ) 234 | 235 | backups_to_remove = set(domain_backups).difference(keep_backups) 236 | for b in backups_to_remove: 237 | logger.info("Cleaning backup {} for domain {}".format(b.date, domain)) 238 | b.delete() 239 | self.backups[domain].remove(b) 240 | backups_removed.add(b) 241 | 242 | return backups_removed 243 | 244 | def clean_broken_backups(self): 245 | backups_removed = set() 246 | for domain, backups in self.broken_backups.items(): 247 | for backup in backups: 248 | backup.clean_aborted() 249 | self.broken_backups[domain].remove(backup) 250 | backups_removed.add(backup) 251 | 252 | return backups_removed 253 | 254 | def _keep_n_periodic_backups(self, sorted_backups, period, n): 255 | if not n: 256 | return [] 257 | 258 | grouped_backups = self._group_backup_by_period(sorted_backups, period) 259 | 260 | # will keep all yearly backups 261 | if n == "*": 262 | n = 0 263 | return set( 264 | backups[0] for group, backups in sorted(grouped_backups.items())[-n:] 265 | ) 266 | 267 | def _group_backup_by_period(self, sorted_backups, period): 268 | grouped_backups = defaultdict(list) 269 | periods = ("hour", "day", "week", "month", "year") 270 | for backup in sorted_backups: 271 | key = tuple( 272 | getattr(backup.date, p) 273 | for p in reversed(periods[periods.index(period) :]) 274 | ) 275 | grouped_backups[key].append(backup) 276 | return grouped_backups 277 | -------------------------------------------------------------------------------- /virt_backup/groups/pattern.py: -------------------------------------------------------------------------------- 1 | import libvirt 2 | import logging 3 | import re 4 | 5 | from virt_backup.domains import search_domains_regex 6 | 7 | 8 | logger = logging.getLogger("virt_backup") 9 | 10 | 11 | def matching_libvirt_domains_from_config(host, conn): 12 | """ 13 | Return matching domains with the host definition 14 | 15 | Will be mainly used by config, 16 | 17 | :param host: domain name or custom regex to match on multiple domains 18 | :param conn: connection with libvirt 19 | :returns {"domains": (domain_name, ), "exclude": bool, "properties": {}}: exclude 20 | will indicate if the domains need to be explicitly excluded of the backup 21 | group or not (for example, if a user wants to exclude all domains 22 | starting by a certain pattern). Domains will not be libvirt.virDomain 23 | objects, but just domain names (easier to manage the include/exclude 24 | feature) 25 | """ 26 | if isinstance(host, str): 27 | pattern = host 28 | else: 29 | try: 30 | pattern = host["host"] 31 | except KeyError as e: 32 | logger.error( 33 | "Configuration error, missing host for lines: \n" "{}".format(host) 34 | ) 35 | raise e 36 | matches = pattern_matching_domains_in_libvirt(pattern, conn) 37 | # not useful to continue if no domain matches or if the host variable 38 | # doesn't bring any property for our domain (like which disks to backup) 39 | if not isinstance(host, dict) or not matches["domains"]: 40 | matches["properties"] = {} 41 | return matches 42 | 43 | matches["properties"] = host 44 | return matches 45 | 46 | 47 | def pattern_matching_domains_in_libvirt(pattern, conn): 48 | """ 49 | Parse the host pattern as written in the config and find matching hosts 50 | 51 | :param pattern: pattern to match on one or several domain names 52 | :param conn: connection with libvirt 53 | """ 54 | exclude, pattern = _handle_possible_exclusion_host_pattern(pattern) 55 | if pattern.startswith("r:"): 56 | pattern = pattern[2:] 57 | domains = search_domains_regex(pattern, conn) 58 | elif pattern.startswith("g:"): 59 | domains = _include_group_domains(pattern) 60 | else: 61 | try: 62 | # will raise libvirt.libvirtError if the domain is not found 63 | conn.lookupByName(pattern) 64 | domains = (pattern,) 65 | except libvirt.libvirtError as e: 66 | logger.error(e) 67 | domains = tuple() 68 | 69 | return {"domains": domains, "exclude": exclude} 70 | 71 | 72 | def domains_matching_with_patterns(domains, patterns): 73 | include, exclude = set(), set() 74 | for pattern in patterns: 75 | for domain in domains: 76 | pattern_comparaison = is_domain_matching_with(domain, pattern) 77 | if not pattern_comparaison["matches"]: 78 | continue 79 | if pattern_comparaison["exclude"]: 80 | exclude.add(domain) 81 | else: 82 | include.add(domain) 83 | return include.difference(exclude) 84 | 85 | 86 | def is_domain_matching_with(domain_name, pattern): 87 | """ 88 | Parse the host pattern as written in the config and check if the domain 89 | name matches 90 | 91 | :param domain_name: domain name 92 | :param pattern: pattern to match on 93 | :returns: {matches: bool, exclude: bool} 94 | """ 95 | exclude, pattern = _handle_possible_exclusion_host_pattern(pattern) 96 | if pattern.startswith("r:"): 97 | pattern = pattern[2:] 98 | matches = re.match(pattern, domain_name) 99 | elif pattern.startswith("g:"): 100 | # TODO: to implement 101 | matches = False 102 | else: 103 | matches = pattern == domain_name 104 | 105 | return {"matches": matches, "exclude": exclude} 106 | 107 | 108 | def _handle_possible_exclusion_host_pattern(pattern): 109 | """ 110 | Check if pattern starts with "!", meaning matching hosts will be excluded 111 | 112 | :returns: exclude, sanitized_pattern 113 | """ 114 | # if the pattern starts with !, exclude the matching domains 115 | exclude = pattern.startswith("!") 116 | if exclude: 117 | # clean pattern to remove the '!' char 118 | pattern = pattern[1:] 119 | return exclude, pattern 120 | 121 | 122 | def _include_group_domains(pattern): 123 | pattern = pattern[2:] 124 | # TODO: option to include another group into this one. It would 125 | # need to include all domains of this group. 126 | # domains = 127 | return [] 128 | -------------------------------------------------------------------------------- /virt_backup/groups/pending.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import concurrent.futures 3 | import logging 4 | import multiprocessing 5 | import os 6 | 7 | from virt_backup.backups import DomBackup, build_dom_complete_backup_from_def 8 | from virt_backup.domains import search_domains_regex 9 | from virt_backup.exceptions import BackupsFailureInGroupError, CancelledError 10 | from .pattern import matching_libvirt_domains_from_config 11 | 12 | 13 | logger = logging.getLogger("virt_backup") 14 | 15 | 16 | def groups_from_dict(groups_dict, conn, callbacks_registrer): 17 | """ 18 | Construct and yield BackupGroups from a dict (typically as stored in 19 | config) 20 | 21 | :param groups_dict: dict of groups properties (take a look at the 22 | config syntax for more info) 23 | :param conn: connection with libvirt 24 | """ 25 | 26 | def build(name, properties): 27 | hosts = properties.pop("hosts") 28 | include, exclude = [], [] 29 | for host in hosts: 30 | # TODO: matching should not filter some options. A function should be done 31 | # here like the sanitize_properties to raise an error per host if the 32 | # configuration is invalid. 33 | matches = matching_libvirt_domains_from_config(host, conn) 34 | if not matches.get("domains", None): 35 | continue 36 | if matches["exclude"]: 37 | exclude += list(matches["domains"]) 38 | else: 39 | matches.pop("exclude") 40 | include.append(matches) 41 | 42 | logger.debug("Include domains: {}".format(include)) 43 | logger.debug("Exclude domains: {}".format(exclude)) 44 | 45 | sanitize_properties(properties) 46 | 47 | backup_group = BackupGroup( 48 | name=name, conn=conn, callbacks_registrer=callbacks_registrer, **properties 49 | ) 50 | for i in include: 51 | for domain_name in i["domains"]: 52 | if domain_name not in exclude: 53 | domain = conn.lookupByName(domain_name) 54 | sanitize_domain_properties(i["properties"]) 55 | backup_group.add_domain( 56 | domain, 57 | i["properties"].get("disks", ()), 58 | quiesce=i["properties"].get("quiesce"), 59 | ) 60 | 61 | return backup_group 62 | 63 | def sanitize_properties(properties): 64 | # replace some properties by the correct ones 65 | if properties.get("target", None): 66 | properties["backup_dir"] = properties.pop("target") 67 | elif properties.get("target_dir", None): 68 | properties["backup_dir"] = properties.pop("target_dir") 69 | 70 | # pop params related to complete groups only 71 | for prop in ("hourly", "daily", "weekly", "monthly", "yearly"): 72 | try: 73 | properties.pop(prop) 74 | except KeyError: 75 | continue 76 | 77 | return properties 78 | 79 | def sanitize_domain_properties(properties): 80 | if properties.get("disks"): 81 | properties["disks"] = sorted(properties["disks"]) 82 | 83 | return properties 84 | 85 | for group_name, group_properties in groups_dict.items(): 86 | yield build(group_name, group_properties.copy()) 87 | 88 | 89 | class BackupGroup: 90 | """ 91 | Group of libvirt domain backups 92 | """ 93 | 94 | def __init__( 95 | self, name="unnamed", domlst=None, autostart=True, **default_bak_param 96 | ): 97 | """ 98 | :param domlst: domain and disks to backup. If specified, has to be a 99 | dict, where key would be the domain to backup, and value 100 | an iterable containing the disks name to backup. Value 101 | could be None 102 | """ 103 | #: list of DomBackup 104 | self.backups = list() 105 | 106 | #: group name, "unnamed" by default 107 | self.name = name 108 | 109 | #: does this group have to be autostarted from the main function or not 110 | self.autostart = autostart 111 | 112 | #: default attributes for new created domain backups. Keys and values 113 | # correspond to what a DomBackup object expect as attributes 114 | self.default_bak_param = default_bak_param 115 | 116 | if domlst: 117 | for bak_item in domlst: 118 | try: 119 | dom, disks = bak_item 120 | except TypeError: 121 | dom, disks = (bak_item, ()) 122 | self.add_domain(dom, disks) 123 | 124 | def add_domain(self, dom, disks=(), quiesce=None): 125 | """ 126 | Add a domain and disks to backup in this group 127 | 128 | If a backup already exists for the domain, will add the disks to the 129 | first backup found 130 | 131 | :param dom: dom to backup 132 | :param disks: disks to backup and attached to dom 133 | """ 134 | try: 135 | # if a backup of `dom` already exists, add the disks to the first 136 | # backup found 137 | existing_bak = next(self.search(dom)) 138 | existing_bak.add_disks(*disks) 139 | except StopIteration: 140 | # spawn a new DomBackup instance otherwise 141 | kwargs = self.default_bak_param.copy() 142 | if quiesce is not None: 143 | kwargs["quiesce"] = quiesce 144 | 145 | self.backups.append(DomBackup(dom=dom, dev_disks=disks, **kwargs)) 146 | 147 | def add_dombackup(self, dombackup): 148 | """ 149 | Add a DomBackup to this group 150 | 151 | If a backup already exists for the same domain with the same 152 | properties, will add the disks to the first backup found 153 | 154 | :param dombackup: dombackup to add 155 | """ 156 | for existing_bak in self.search(dombackup.dom): 157 | if existing_bak.compatible_with(dombackup): 158 | existing_bak.merge_with(dombackup) 159 | return 160 | else: 161 | self.backups.append(dombackup) 162 | 163 | def search(self, dom): 164 | """ 165 | Search for a domain 166 | 167 | :param dom: domain to search the associated DomBackup object. 168 | libvirt.virDomain object 169 | :returns: a generator of DomBackup matching 170 | """ 171 | for backup in self.backups: 172 | if backup.dom.UUID() == dom.UUID(): 173 | yield backup 174 | 175 | def propagate_default_backup_attr(self): 176 | """ 177 | Propagate default backup attributes to all attached backups 178 | """ 179 | for backup in self.backups: 180 | for attr, val in self.default_bak_param.items(): 181 | setattr(backup, attr, val) 182 | 183 | def start(self): 184 | """ 185 | Start to backup all DomBackup objects attached 186 | 187 | :returns results: dictionary of domain names and their backup 188 | """ 189 | completed_backups = {} 190 | error_backups = {} 191 | 192 | for b in self.backups: 193 | dom_name = b.dom.name() 194 | try: 195 | completed_backups[dom_name] = self._start_backup(b) 196 | except KeyboardInterrupt: 197 | raise 198 | except Exception as e: 199 | error_backups[dom_name] = e 200 | logger.error("Error with domain %s: %s", dom_name, e) 201 | logger.exception(e) 202 | 203 | if error_backups: 204 | raise BackupsFailureInGroupError(completed_backups, error_backups) 205 | else: 206 | return completed_backups 207 | 208 | def start_multithread(self, nb_threads=None): 209 | """ 210 | Start all backups, multi threaded 211 | 212 | It is wanted to avoid running multiple backups on the same domain (if 213 | the target dir is different for 2 backups of the same domain, for 214 | example), because of the way backups are done. An external snapshot is 215 | created then removed, backups would copy the external snapshot of other 216 | running backups instead of the real disk. 217 | 218 | To avoid this issue, a callback is set for each futures in order to 219 | notify when they are completed, and put the completed domain in a 220 | queue. 221 | If no other backup is to do for this domain, it will be dropped, 222 | otherwise a backup targeting this domain will be started. 223 | """ 224 | nb_threads = nb_threads or multiprocessing.cpu_count() 225 | 226 | backups_by_domain = self._group_backups_by_domain() 227 | 228 | completed_backups = {} 229 | error_backups = {} 230 | 231 | completed_doms = [] 232 | futures = {} 233 | try: 234 | with concurrent.futures.ThreadPoolExecutor(nb_threads) as executor: 235 | for backups_for_domain in backups_by_domain.values(): 236 | backup = backups_for_domain.pop() 237 | future = self._submit_backup_future( 238 | executor, backup, completed_doms 239 | ) 240 | futures[future] = backup 241 | 242 | while len(futures) < len(self.backups): 243 | next(concurrent.futures.as_completed(futures)) 244 | dom = completed_doms.pop().dom 245 | if backups_by_domain.get(dom): 246 | backup = backups_by_domain[dom].pop() 247 | future = self._submit_backup_future( 248 | executor, backup, completed_doms 249 | ) 250 | futures[future] = backup 251 | 252 | for f in concurrent.futures.as_completed(futures): 253 | dom_name = futures[f].dom.name() 254 | try: 255 | completed_backups[dom_name] = f.result() 256 | except KeyboardInterrupt: 257 | raise 258 | except Exception as e: 259 | error_backups[dom_name] = e 260 | logger.error("Error with domain %s: %s", dom_name, e) 261 | logger.exception(e) 262 | except: 263 | for f in futures: 264 | f.cancel() 265 | for f in futures: 266 | if f.running(): 267 | logger.info("Cancel backup for domain %s", futures[f].dom.name()) 268 | futures[f].cancel() 269 | 270 | concurrent.futures.wait(futures) 271 | raise 272 | 273 | if error_backups: 274 | raise BackupsFailureInGroupError(completed_backups, error_backups) 275 | else: 276 | return completed_backups 277 | 278 | def _group_backups_by_domain(self): 279 | backups_by_domain = defaultdict(list) 280 | for b in self.backups: 281 | backups_by_domain[b.dom].append(b) 282 | 283 | return backups_by_domain 284 | 285 | def _submit_backup_future(self, executor, backup, completed_doms: list): 286 | """ 287 | :param completed_doms: list where a completed backup will append its 288 | domain. 289 | """ 290 | future = executor.submit(self._start_backup, backup) 291 | future.add_done_callback(lambda *args: completed_doms.append(backup.dom)) 292 | 293 | return future 294 | 295 | def _start_backup(self, backup): 296 | self._ensure_backup_is_set_in_domain_dir(backup) 297 | return backup.start() 298 | 299 | def _ensure_backup_is_set_in_domain_dir(self, dombackup): 300 | """ 301 | Ensure that a dombackup is set to be in a directory having the name of 302 | the related Domain 303 | """ 304 | if not dombackup.backup_dir: 305 | return 306 | 307 | if os.path.dirname(dombackup.backup_dir) != dombackup.dom.name(): 308 | dombackup.backup_dir = os.path.join( 309 | dombackup.backup_dir, dombackup.dom.name() 310 | ) 311 | -------------------------------------------------------------------------------- /virt_backup/tools.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import shutil 4 | 5 | 6 | def copy_file(src, dst, buffersize=None): 7 | if not os.path.exists(dst) and dst.endswith("/"): 8 | os.makedirs(dst) 9 | if os.path.isdir(dst): 10 | dst = os.path.join(dst, os.path.basename(src)) 11 | 12 | with open(src, "rb") as fsrc, open(dst, "wb") as fdst: 13 | shutil.copyfileobj(fsrc, fdst, buffersize) 14 | return dst 15 | 16 | 17 | class InfoFilter(logging.Filter): 18 | def filter(self, record): 19 | return record.levelno in (logging.DEBUG, logging.INFO) 20 | --------------------------------------------------------------------------------