├── .github └── workflows │ ├── release.yml │ └── tests.yml ├── .gitignore ├── CHANGELOG.md ├── README.md ├── pyproject.toml ├── requirements-dev.txt ├── requirements.txt ├── setup.cfg ├── setup.py └── src ├── pvecontrol ├── __init__.py ├── __main__.py ├── actions │ ├── __init__.py │ ├── cluster.py │ ├── node.py │ ├── storage.py │ ├── task.py │ └── vm.py ├── cli.py ├── config.py ├── config_default.yaml ├── models │ ├── __init__.py │ ├── backup_job.py │ ├── cluster.py │ ├── node.py │ ├── storage.py │ ├── task.py │ ├── vm.py │ └── volume.py ├── sanitycheck │ ├── __init__.py │ ├── checks.py │ ├── sanitychecks.py │ └── tests │ │ ├── __init__.py │ │ ├── ha_groups.py │ │ ├── ha_vms.py │ │ ├── nodes.py │ │ ├── vm.py │ │ └── vm_backups.py └── utils.py └── tests ├── __init__.py ├── fixtures └── api.py ├── sanitycheck ├── __init__.py ├── test_vm_backups.py ├── test_vm_disks.py └── utils.py ├── test_backup_job.py ├── test_cluster.py ├── test_pvecontrol.py ├── test_utils.py └── testcase.py /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release New version 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | # Push new release using semantic-release 8 | release: 9 | runs-on: ubuntu-latest 10 | if: github.ref == 'refs/heads/main' 11 | concurrency: release 12 | environment: release 13 | permissions: 14 | id-token: write 15 | contents: write 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | 22 | - name: Python Semantic Release 23 | uses: python-semantic-release/python-semantic-release@master 24 | with: 25 | github_token: ${{ secrets.GITHUB_TOKEN }} 26 | 27 | - name: Set up Python 28 | uses: actions/setup-python@v5 29 | with: 30 | python-version: "3.10" 31 | - name: Install dependencies 32 | run: | 33 | python -m pip install --upgrade pip 34 | pip install build 35 | - name: Build package 36 | run: python -m build 37 | - name: Publish package 38 | uses: pypa/gh-action-pypi-publish@release/v1 39 | with: 40 | user: __token__ 41 | password: ${{ secrets.PYPI_API_TOKEN }} 42 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Tests 5 | 6 | on: 7 | push: 8 | branches: [ "main" ] 9 | pull_request: 10 | 11 | jobs: 12 | # Run tests for code formating 13 | black: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Set up Python 20 | uses: actions/setup-python@v5 21 | with: 22 | python-version: '3.10' 23 | cache: 'pip' 24 | cache-dependency-path: '**/requirements*.txt' 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install -r requirements-dev.txt 29 | - name: Run black 30 | run: black . --check --diff 31 | 32 | # Run tests for lint 33 | pylint: 34 | runs-on: ubuntu-latest 35 | 36 | steps: 37 | - uses: actions/checkout@v4 38 | 39 | - name: Set up Python 40 | uses: actions/setup-python@v5 41 | with: 42 | python-version: '3.10' 43 | cache: 'pip' 44 | cache-dependency-path: '**/requirements*.txt' 45 | - name: Install dependencies 46 | run: | 47 | python -m pip install --upgrade pip 48 | pip install -r requirements.txt -r requirements-dev.txt 49 | - name: Run pylint 50 | run: pylint src/ 51 | 52 | # Run tests 53 | code: 54 | runs-on: ubuntu-latest 55 | 56 | strategy: 57 | fail-fast: false 58 | matrix: 59 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] 60 | 61 | steps: 62 | - uses: actions/checkout@v4 63 | 64 | - name: Set up Python ${{ matrix.python-version }} 65 | uses: actions/setup-python@v5 66 | with: 67 | python-version: ${{ matrix.python-version }} 68 | cache: 'pip' 69 | cache-dependency-path: '**/requirements*.txt' 70 | - name: Install dependencies 71 | run: | 72 | python -m pip install --upgrade pip 73 | pip install . -r requirements.txt -r requirements-dev.txt 74 | - name: Test with pytest 75 | run: | 76 | pytest 77 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | *.swp 40 | *.~ 41 | .env 42 | .vscode/ 43 | 44 | .pytest* 45 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | 4 | ## v0.6.0 (2025-05-12) 5 | 6 | ### Bug Fixes 7 | 8 | - Bad formatting in "no such cluster" log 9 | ([`8d333d1`](https://github.com/enix/pvecontrol/commit/8d333d1fa200829bb73c436dcfdd26c25f7d3b3f)) 10 | 11 | - Enable ssl_verification from config, catch failures 12 | ([`c82c9e4`](https://github.com/enix/pvecontrol/commit/c82c9e4b140f56d5be6dc4d694faff8181d256e8)) 13 | 14 | - Exit 130 without printing stacktrace on sigint 15 | ([`bffe5d1`](https://github.com/enix/pvecontrol/commit/bffe5d1b9e432f6047627913a0c4d9d9ab28102a)) 16 | 17 | - Get version with help command, README typos 18 | ([`d4ac84f`](https://github.com/enix/pvecontrol/commit/d4ac84f35a5e111002393ebe700a6320911bc15f)) 19 | 20 | - Rebase click changes, more stuffs broken with json/yaml translator 21 | ([`fbab03a`](https://github.com/enix/pvecontrol/commit/fbab03aecd7a4af7735ea76ae70927b80da71406)) 22 | 23 | - Rm duped main (pip should gen the executable script) 24 | ([`a5fb557`](https://github.com/enix/pvecontrol/commit/a5fb557e453894815c992d743e53312cecf4d144)) 25 | 26 | - Set default memoryminimum to 8GB (8589934592 instead of 81928589934592) 27 | ([`c22ec97`](https://github.com/enix/pvecontrol/commit/c22ec977868d42e1590fc56f8c1b482e37743ff0)) 28 | 29 | The value was almost good, but prefixed with 8192, which is almost good also. But all together, it 30 | wasn't good at all. 31 | 32 | - Set default when assigning resource_* prop (KeyError on faulty nodes) 33 | ([`53e2c2e`](https://github.com/enix/pvecontrol/commit/53e2c2ebd17dcda0fc4913342a5137f5d8f11c6a)) 34 | 35 | - Typo, incorrect status ref 36 | ([`afab5ae`](https://github.com/enix/pvecontrol/commit/afab5ae7a2608a1421d5fa74cfb09cbe3c6b8f7d)) 37 | 38 | - Use env in she-bangs 39 | ([`de6a25d`](https://github.com/enix/pvecontrol/commit/de6a25d20264ffed1390bcb988e60e73f363c00b)) 40 | 41 | - **config**: Set default timeout to 60 42 | ([`13a58ad`](https://github.com/enix/pvecontrol/commit/13a58ad490a89463ef85ec79e966a718f4c3f239)) 43 | 44 | - **sanitycheck**: Correct disk unused on check 45 | ([`ee32fc1`](https://github.com/enix/pvecontrol/commit/ee32fc1e6feb53fd05835c6999a0c2fa8f9790de)) 46 | 47 | - **sanitycheck**: Fix wording in backup messages 48 | ([`3e599e1`](https://github.com/enix/pvecontrol/commit/3e599e1071e5024bbc4cebae71e993a1e28ea4d4)) 49 | 50 | - **sanitycheck**: Patch VM startonboot option check 51 | ([`1ded8c4`](https://github.com/enix/pvecontrol/commit/1ded8c424ef24901399ad33d8b5255d52e3b6f20)) 52 | 53 | - **sanitycheck**: Reduce complexity on VMStartOnBoot check 54 | ([`591c60f`](https://github.com/enix/pvecontrol/commit/591c60f32da2bcdd970a220a324182dafda1ebc7)) 55 | 56 | - **sanitycheck**: Warn when vms don't have any backup 57 | ([`b86ea89`](https://github.com/enix/pvecontrol/commit/b86ea8955b4f26e219689c152ddbc0787fca742d)) 58 | 59 | - **sanitycheck**: Wording 60 | ([`fb10b29`](https://github.com/enix/pvecontrol/commit/fb10b292e484e3d070f8bf8372fb398e77aec4fd)) 61 | 62 | - **test**: Correct code repetition 63 | ([`f8f9625`](https://github.com/enix/pvecontrol/commit/f8f96253aa68f37413a94c89a2a491c01f0bc61f)) 64 | 65 | - **vm**: Add tags as vm columns 66 | ([`6a48132`](https://github.com/enix/pvecontrol/commit/6a48132123c8ffa677dceda56d07da0f25d43fbb)) 67 | 68 | - **vm**: Correctly parse tag list 69 | ([`c6f7e78`](https://github.com/enix/pvecontrol/commit/c6f7e7842677ca941409d732b343540b507ac35f)) 70 | 71 | - **vm**: Get_backup_jobs when selection mode is "all" or "exclude selected" and filter by node 72 | ([`b10ba25`](https://github.com/enix/pvecontrol/commit/b10ba25a410eb8c7b219528dfe8ef8f08a4c4abf)) 73 | 74 | - **vm**: Get_backup_jobs when selection mode is "pool based" 75 | ([`0cf55ef`](https://github.com/enix/pvecontrol/commit/0cf55efa65926cbb38ccb3db8173a96743a641b7)) 76 | 77 | - **vmstatus**: Add new VmStatus 78 | ([`fa20415`](https://github.com/enix/pvecontrol/commit/fa2041543dc1284652aaa3d17d8554601823ec37)) 79 | 80 | ### Build System 81 | 82 | - Trigger release manually from main branch only 83 | ([`5ff5bbd`](https://github.com/enix/pvecontrol/commit/5ff5bbda9e9bad6fc149b966434eb3559424153f)) 84 | 85 | - **ci**: Enable pip dependencies caching 86 | ([`7e9a391`](https://github.com/enix/pvecontrol/commit/7e9a3918cf51b71838296a6e8cf3587408b22da3)) 87 | 88 | - **ci**: Fetch with default depth (1 commit) instead of the whole history 89 | ([`f569685`](https://github.com/enix/pvecontrol/commit/f56968528cf263b0fff35e2baa981600a6370451)) 90 | 91 | - **ci**: Install pvecontrol package before running tests 92 | ([`949a85e`](https://github.com/enix/pvecontrol/commit/949a85ea258d0eac6821a9b3b131787ed9d453d2)) 93 | 94 | - **ci**: Remove flake8 linting and redundant pytest job 95 | ([`8c41a46`](https://github.com/enix/pvecontrol/commit/8c41a46e923559e846ffc0c2acb0ae9592ff81e8)) 96 | 97 | - **ci**: Trigger tests on pull requests but not on push 98 | ([`a04dc02`](https://github.com/enix/pvecontrol/commit/a04dc02d07c8e59c59c87ddc253272588ff86b73)) 99 | 100 | - **ci**: Upgrade actions versions 101 | ([`b72f95a`](https://github.com/enix/pvecontrol/commit/b72f95a9dfe5e5266a7df37c3306c1003102bd76)) 102 | 103 | ### Chores 104 | 105 | - Drop python-3.8 support, enable python-3.13 106 | ([`df51da3`](https://github.com/enix/pvecontrol/commit/df51da3e809152c0b1eee312e19a445b406332ff)) 107 | 108 | - Enable 'diff' option with black formatter 109 | ([`9d1edd5`](https://github.com/enix/pvecontrol/commit/9d1edd53c23ec8cb368b4fdcd81373dd7f3efdf8)) 110 | 111 | - Move root models to a dedicated folder 112 | ([`71fdc63`](https://github.com/enix/pvecontrol/commit/71fdc631124d74dc6beea022645ba14df322219d)) 113 | 114 | - **black**: Fix syntax 115 | ([`394b83e`](https://github.com/enix/pvecontrol/commit/394b83e21c28a2428ad45d2294aaeb3e3a104ee1)) 116 | 117 | - **cluster**: Add some logging around backups 118 | ([`ff863f8`](https://github.com/enix/pvecontrol/commit/ff863f8298aa7a99ee81cb34d3eccc7b39a386a4)) 119 | 120 | - **README**: Update fish completion documentation 121 | ([`c68bb6c`](https://github.com/enix/pvecontrol/commit/c68bb6cf6ed270871ea01fa88f5e869eafb5d90e)) 122 | 123 | ### Documentation 124 | 125 | - A command in README was using the legacy syntax 126 | ([`25cb79e`](https://github.com/enix/pvecontrol/commit/25cb79e47728810467feb6061f9cf4425383029b)) 127 | 128 | ### Features 129 | 130 | - Add a markdown outputFormat 131 | ([`fd9613f`](https://github.com/enix/pvecontrol/commit/fd9613f3743957babf744296ab061116ba316315)) 132 | 133 | - Add api timeout configuration 134 | ([`d2291ff`](https://github.com/enix/pvecontrol/commit/d2291ff8b3b3a238d92f239be1b57f2d9e1c24e8)) 135 | 136 | - Add list of available checks in sanitycheck --help 137 | ([`202eff5`](https://github.com/enix/pvecontrol/commit/202eff5bc19435f0c879982408197658f6cf20e3)) 138 | 139 | - Allow sorting and filtering on hidden columns 140 | ([`ce59a7a`](https://github.com/enix/pvecontrol/commit/ce59a7a764f9f84672162aa3388700f739f11022)) 141 | 142 | In render_output, we stopped using `filter_keys`, in order to be able to sort and filter on hidden 143 | columns as well. Instead, we use `reorder_keys` because filter_keys used to reorder keys and we 144 | want the columns to appears in the order given by the arguments. Last but not least: in 145 | `add_list_resource_command` we actually reimplement `filter_keys` in order to remove columns that 146 | should never be rendered (node._api for instance). First because we don't want users to display 147 | them, and second because PrettyTable take in account the rendering of hidden columns to calculate 148 | the height of each cells, which would produce very high columns in some cases. 149 | 150 | - Allow to configure proxy certificate and key with one unique command 151 | ([`3b2571d`](https://github.com/enix/pvecontrol/commit/3b2571da998c38fa629763163eb91899a1a58edb)) 152 | 153 | - Enable outputFormats (yaml, json) for clusterstatus 154 | ([`eb9262a`](https://github.com/enix/pvecontrol/commit/eb9262ae02dd01684675bce88e4612500f180f0f)) 155 | 156 | - Support for proxmox clusters behind a reverse proxy with certificate-based authentication 157 | ([`902c759`](https://github.com/enix/pvecontrol/commit/902c759a1c31dd151df876169983312579dc3224)) 158 | 159 | - **cli**: Get --cluster flag from env 160 | ([`760c613`](https://github.com/enix/pvecontrol/commit/760c6135c72ecb2007ce7649ec9ace995a7c7526)) 161 | 162 | - **sanitycheck**: Check if disks are unused 163 | ([`d31752f`](https://github.com/enix/pvecontrol/commit/d31752f1420484515ba4a2a418ac5e716cdc6a07)) 164 | 165 | - **sanitycheck**: Check vms have a backup job configured 166 | ([`1b8617f`](https://github.com/enix/pvecontrol/commit/1b8617f9cf13aa0cc658376dfba0909e0b4c44d9)) 167 | 168 | - **sanitycheck**: Check vms have at least one recent backup 169 | ([`db42367`](https://github.com/enix/pvecontrol/commit/db42367a0290eccb4b40e53dcbe400ab56274b33)) 170 | 171 | - **sanitychecks**: Check onboot vm option 172 | ([`cda70f8`](https://github.com/enix/pvecontrol/commit/cda70f8c1a3e8ef8b04e6c083b19c8925a181cdc)) 173 | 174 | - **vm**: Parse tags as sets for easier usage 175 | ([`7dda4fe`](https://github.com/enix/pvecontrol/commit/7dda4fe3e19ec3ad3e461909a0b65f43accfcd6c)) 176 | 177 | ### Refactoring 178 | 179 | - Add PVEVolume and PVEBackupJob classes 180 | ([`f488dbe`](https://github.com/enix/pvecontrol/commit/f488dbe62a68042cfa0fb647a942152e50c41ae8)) 181 | 182 | - Create a click ResourceGroup class that automatically adds a list subcommand 183 | ([`592b6d0`](https://github.com/enix/pvecontrol/commit/592b6d0941a81106f7419a61be37fa28cca4218b)) 184 | 185 | - Move backups and backups_job listing in PVECluster and PVEVm 186 | ([`2cedc7d`](https://github.com/enix/pvecontrol/commit/2cedc7dc759601d4918bd51ed26eb0516836fe02)) 187 | 188 | - Move cli related functions from utils.py to cli.py 189 | ([`67cc27a`](https://github.com/enix/pvecontrol/commit/67cc27a2ade021ae696ccd49366992d54e6323d9)) 190 | 191 | - Move PVECluster.get_node_resources to PVENode.resources property 192 | ([`5f028ad`](https://github.com/enix/pvecontrol/commit/5f028adf0f71dc1a54dcf15d7b33f82d10f95fac)) 193 | 194 | - Remove dead code in Check 195 | ([`2a24f61`](https://github.com/enix/pvecontrol/commit/2a24f6124ffc62bfd8f91467057d57d9ab753d28)) 196 | 197 | - Use property instead of method whenever possible in PVECluster and PVENode 198 | ([`34efd0c`](https://github.com/enix/pvecontrol/commit/34efd0c51cff3286f1d8f92784dfbbcac767cc57)) 199 | 200 | - **cli**: Show help instead of an error when no args is provided 201 | ([`4d2d474`](https://github.com/enix/pvecontrol/commit/4d2d474ea56da6b7f7c44e67b80e305bdc9b9ccd)) 202 | 203 | - **cli**: Split subcommands between name and verb and use click instead of argparse 204 | ([`38f28f9`](https://github.com/enix/pvecontrol/commit/38f28f9633a09f79c369b1f17791db2982c3566a)) 205 | 206 | Co-authored-by: Yoann Lamouroux 207 | 208 | - **sanitycheck**: Improve vm_backups wording 209 | ([`1ca8721`](https://github.com/enix/pvecontrol/commit/1ca8721b0d008e3b249a12be061585445d767d7d)) 210 | 211 | - **sanitycheck**: Use storage.images instead of storage.get_content("images") 212 | ([`aa6db96`](https://github.com/enix/pvecontrol/commit/aa6db969ab8fab661f939cfe60bd526ef0aae6e9)) 213 | 214 | ### Testing 215 | 216 | - **sanitycheck**: Vm backups 217 | ([`5e0fd4e`](https://github.com/enix/pvecontrol/commit/5e0fd4e9d6a5676470de8ee7513742bb9bfe475d)) 218 | 219 | 220 | ## v0.5.0 (2025-01-30) 221 | 222 | ### Bug Fixes 223 | 224 | - Cleanup code 225 | ([`992d6d2`](https://github.com/enix/pvecontrol/commit/992d6d266196b86a946ef2af275189de5013300b)) 226 | 227 | - Correct clusterconfig default value overwrite 228 | ([`aa30cbf`](https://github.com/enix/pvecontrol/commit/aa30cbf48f52cdb44b901dcf1f719943b7143d7a)) 229 | 230 | - Crash on sortby None (default value) 231 | ([`075761e`](https://github.com/enix/pvecontrol/commit/075761ecdb7a879197ade4c061e683c4b61f7622)) 232 | 233 | - Crash when filtering returns 0 results 234 | ([`57c6892`](https://github.com/enix/pvecontrol/commit/57c68929bc93191f1b1b17984623ea090dbb7d40)) 235 | 236 | - Harmonize logs when using --wait and --follow 237 | ([`81df66a`](https://github.com/enix/pvecontrol/commit/81df66aca8cf664046832849aa0ced1e275607b5)) 238 | 239 | - Missing method self._initstatus() in task.refresh() 240 | ([`a730fc4`](https://github.com/enix/pvecontrol/commit/a730fc4de142f0bb215da15f9b5fdb20827ab5a8)) 241 | 242 | - Patch clusterconfig never set to default config 243 | ([`9d02b41`](https://github.com/enix/pvecontrol/commit/9d02b41befd0b5da09a919db7a6bb116573dddf6)) 244 | 245 | - Strenum isn't available for python 3.10 246 | ([`29e44bd`](https://github.com/enix/pvecontrol/commit/29e44bdc31a75eafc30bfeb1ae13c1a92826d8c3)) 247 | 248 | - Transpose existing sanity checks to new class 249 | ([`368d868`](https://github.com/enix/pvecontrol/commit/368d8680928ed97286c0842b656bb39b67099939)) 250 | 251 | - **clusterstatus**: Update cli output 252 | ([`23d3aa7`](https://github.com/enix/pvecontrol/commit/23d3aa722f75d50e0c3ec86fec53580c082cd2a4)) 253 | 254 | - **config**: Correct PVECluster args node to config 255 | ([`833dd6e`](https://github.com/enix/pvecontrol/commit/833dd6ec51cdd664d2d5b160660097f19c3eb1ec)) 256 | 257 | - **config**: Correct PVECluster args node to config 258 | ([`e5367e8`](https://github.com/enix/pvecontrol/commit/e5367e8aa478bb3c350bcf3c58e7ca0aa4df1b8b)) 259 | 260 | - **config**: Rollback node_factors to node 261 | ([`8dffbc8`](https://github.com/enix/pvecontrol/commit/8dffbc897148375ec6ad83103135454cff66b116)) 262 | 263 | - **evacuate**: Change log output 264 | ([`c2c4fd8`](https://github.com/enix/pvecontrol/commit/c2c4fd8b6573044b149706161293150574aa9671)) 265 | 266 | - **evacuate**: Make sure targets are unique 267 | ([`3627ef7`](https://github.com/enix/pvecontrol/commit/3627ef72658eca65c0587315bedd2fc1826b7ae0)) 268 | 269 | - **sanitycheck**: In VM config, cpu are not always return by API 270 | ([`842a444`](https://github.com/enix/pvecontrol/commit/842a44471dc7935ca73bf4e5cbb438d9eb0c21bf)) 271 | 272 | - **sanitycheck**: Patch error on if statements in ha_vms check 273 | ([`9030ffe`](https://github.com/enix/pvecontrol/commit/9030ffead30d8075598e7df19d09a3765ea96ebd)) 274 | 275 | - **sanitycheck**: Patch no checks append to sanity checks list 276 | ([`b280650`](https://github.com/enix/pvecontrol/commit/b280650174ac7f13cafee28a18d15e8888526bf3)) 277 | 278 | - **sanitycheck**: Verify check exists before trying to run it 279 | ([`1535772`](https://github.com/enix/pvecontrol/commit/15357720ae7a69b57b0ab8110955e67746df3999)) 280 | 281 | - **sanitychecks**: Add exitcode and correct message criticity code 282 | ([`ada9222`](https://github.com/enix/pvecontrol/commit/ada9222cf28516e8098cdd2713c7a159ac344704)) 283 | 284 | - **sanitychecks**: Add terminal supports verification (utf-8, bold, colors) 285 | ([`b37570a`](https://github.com/enix/pvecontrol/commit/b37570ac20a353f1f4a0a729f0b384ef614e3b6f)) 286 | 287 | - **sanitychecks**: Correct check code for ha_group 288 | ([`723a3e6`](https://github.com/enix/pvecontrol/commit/723a3e6f56a950d91e7f6724824576de08f388f4)) 289 | 290 | - **sanitychecks**: Patch display issues depending on terminal using curses 291 | ([`b8d1b04`](https://github.com/enix/pvecontrol/commit/b8d1b048faeeed73b54146d4dfa9cb8ae3ef3651)) 292 | 293 | - **sanitychecks**: Patch display issues depending on terminal using curses 294 | ([`b83e6e7`](https://github.com/enix/pvecontrol/commit/b83e6e798cacb641738b75c2c6986ccff1c0fb67)) 295 | 296 | - **sanitychecks**: Patch some issues 297 | ([`31ef892`](https://github.com/enix/pvecontrol/commit/31ef892d5ed2efbcdfd9943a8253bcff42db3c76)) 298 | 299 | - **sanitychecks**: Refacto Checks run with classes 300 | ([`2de9331`](https://github.com/enix/pvecontrol/commit/2de933116ab5e08902dfb5c395f05ced91782443)) 301 | 302 | - **storage**: Patch error on PVEStorage.__str__ 303 | ([`8a83370`](https://github.com/enix/pvecontrol/commit/8a83370655ea10fd8363b9dc3377feda6766cdc7)) 304 | 305 | - **storagelist**: Add sort-by arg 306 | ([`0da34d4`](https://github.com/enix/pvecontrol/commit/0da34d4f9ae125dc2f3b18588feab9dfab9120bd)) 307 | 308 | - **storagelist**: Correct shared col 309 | ([`0d78b7f`](https://github.com/enix/pvecontrol/commit/0d78b7f153b1098e2ef2c891fd8f0ed08cd15496)) 310 | 311 | - **storagelist**: Prototype of print_tableoutput has changed 312 | ([`b5d4367`](https://github.com/enix/pvecontrol/commit/b5d43673b3d55502bb08ded47a325b2d423a8ae6)) 313 | 314 | - **storagelist**: Update PVEStorage kwargs loading 315 | ([`496118d`](https://github.com/enix/pvecontrol/commit/496118defc011b788b371d2a2452ddfafaf0659b)) 316 | 317 | - **tasks**: Nicely handle vanished tasks 318 | ([`64a1c4c`](https://github.com/enix/pvecontrol/commit/64a1c4c70c9ef08ad8fdb78be90f5126d34216d9)) 319 | 320 | Some tasks can deseappear from the API with time. So we must handle this case. 321 | 322 | ### Chores 323 | 324 | - Add CI job for black and config in pyproject 325 | ([`d08192b`](https://github.com/enix/pvecontrol/commit/d08192babe9e6196b232922ea1f0c891c0232bbf)) 326 | 327 | - Fix ci cancelled jobs 328 | ([`61d6607`](https://github.com/enix/pvecontrol/commit/61d660763ef722d0d5ebaafbbee28d3cf7cc2868)) 329 | 330 | - Optimize cli by reducing HTTP calls 331 | ([`078c37d`](https://github.com/enix/pvecontrol/commit/078c37d698a222afff53493bf2d0fce7ec262c0f)) 332 | 333 | - Remove Github deployment in CI stage tests 334 | ([`5ee2257`](https://github.com/enix/pvecontrol/commit/5ee22570823172116a1d48526982418653661f85)) 335 | 336 | - Remove Github deployment in CI stage tests 337 | ([`a95bd61`](https://github.com/enix/pvecontrol/commit/a95bd614559d2a009d851418b4fec644e06e2c97)) 338 | 339 | - Run black 340 | ([`ef4be01`](https://github.com/enix/pvecontrol/commit/ef4be01563d4f7a6016b2c3d04a1dadf2a411efd)) 341 | 342 | - Run black 343 | ([`9aa757b`](https://github.com/enix/pvecontrol/commit/9aa757b54c0aa9d1e49887cd8d58c9e5b093ff6b)) 344 | 345 | - **auth**: Patch tests and lint 346 | ([`8b3d837`](https://github.com/enix/pvecontrol/commit/8b3d837b4a6ed65463686e8c4ddff888c98736dc)) 347 | 348 | - **black**: Correct style for sanitycheck 349 | ([`27b3c73`](https://github.com/enix/pvecontrol/commit/27b3c73c561d5e31dc27eb6b2697baa529e97151)) 350 | 351 | - **black**: Patch black warnings 352 | ([`bb17d7b`](https://github.com/enix/pvecontrol/commit/bb17d7b298341bfe47b94eb0f1a61ed895dcf619)) 353 | 354 | - **ci**: Fix CI execution for PRs 355 | ([`197c49f`](https://github.com/enix/pvecontrol/commit/197c49f35186c134f6e81a06f5ea8da2d1e71f32)) 356 | 357 | - **ci**: Update file requirements-dev.txt 358 | ([`13a540c`](https://github.com/enix/pvecontrol/commit/13a540cf0f18d4f38a202ed4ebc5f3e22878f551)) 359 | 360 | - **pylint**: Add CI job for pylint 361 | ([`579b436`](https://github.com/enix/pvecontrol/commit/579b4362f950fd5a8269499ee1907de24d3042c0)) 362 | 363 | - **pylint**: Init pylint refacto 364 | ([`2daa821`](https://github.com/enix/pvecontrol/commit/2daa82188abe6185bd1beeea7403a50551aa8a16)) 365 | 366 | - **pylint**: Patch last needed 367 | ([`0d87cde`](https://github.com/enix/pvecontrol/commit/0d87cdee065bdbe5cef0dd6bd7e39f5a26ab9678)) 368 | 369 | - **pylint**: Patch loop on pvecontrol module 370 | ([`a218711`](https://github.com/enix/pvecontrol/commit/a218711a7af675fed3a3b64a76544196af8b9d3c)) 371 | 372 | - **pylint**: Patch pvecontrol/actions/cluster.py 373 | ([`77725cb`](https://github.com/enix/pvecontrol/commit/77725cbf4460c8edda2189b209f859c7312dbc79)) 374 | 375 | - **pylint**: Patch pvecontrol/actions/storage.py 376 | ([`97a12c7`](https://github.com/enix/pvecontrol/commit/97a12c70d621574b58086178d041dbfb0b4e3469)) 377 | 378 | - **pylint**: Patch pvecontrol/actions/task.py 379 | ([`9332e6b`](https://github.com/enix/pvecontrol/commit/9332e6bf9a67ea74c771739d3a3020152fb83aa8)) 380 | 381 | - **pylint**: Patch pvecontrol/actions/vm.py 382 | ([`611afb7`](https://github.com/enix/pvecontrol/commit/611afb71a9b4632407d270eb92b9b6c8bcbe00f0)) 383 | 384 | - **pylint**: Patch pvecontrol/node.py 385 | ([`5f27f0b`](https://github.com/enix/pvecontrol/commit/5f27f0b989b2b4529f3879fea3151379e70c6ac5)) 386 | 387 | - **pylint**: Patch src/pvecontrol/cluster.py 388 | ([`17bb25e`](https://github.com/enix/pvecontrol/commit/17bb25e1b7d01b5c9c5608918738cc5869ff6796)) 389 | 390 | - **pylint**: Patch src/pvecontrol/storage.py 391 | ([`363b079`](https://github.com/enix/pvecontrol/commit/363b079fbbea4a803a4cde86679d2e3fc4c4cd5e)) 392 | 393 | - **pylint**: Patch src/pvecontrol/utils.py 394 | ([`8b76209`](https://github.com/enix/pvecontrol/commit/8b76209d118ea7d91b377f237f6ae6d75972841a)) 395 | 396 | - **pylint**: Patch typo 397 | ([`8353a7e`](https://github.com/enix/pvecontrol/commit/8353a7ebe44e3d412e4b570e6d0d8655537833f7)) 398 | 399 | - **pylint**: Rebase to branch black 400 | ([`2c60f5e`](https://github.com/enix/pvecontrol/commit/2c60f5ee7d300f4fff0765f5d219d1bb6c71a01a)) 401 | 402 | - **pylint**: Remove unnecessary pylint comment 403 | ([`d902a2e`](https://github.com/enix/pvecontrol/commit/d902a2ef137a26292cf064cb0353f687e6ba33a8)) 404 | 405 | - **README**: Add documentation about shell auto completion 406 | ([`2695418`](https://github.com/enix/pvecontrol/commit/26954187421b3be93aa947e4cbade4dd00e6b3e9)) 407 | 408 | - **README**: Complete doc for release 409 | ([`c8143c2`](https://github.com/enix/pvecontrol/commit/c8143c28f0787c340a2f28b63464cf888752edb7)) 410 | 411 | * docs: update README 412 | 413 | * chore(README): Add token auth to documentation. 414 | 415 | * docs: merge my token auth docs 416 | 417 | --------- 418 | 419 | Co-authored-by: Laurent Corbes 420 | 421 | - **README**: Fix missing newline 422 | ([`d091473`](https://github.com/enix/pvecontrol/commit/d09147347dd76faf2594a090b8f3d0b8a15a6d92)) 423 | 424 | - **README**: Fix title 425 | ([`16eedc4`](https://github.com/enix/pvecontrol/commit/16eedc40c6798676363a21931ae7e928b1e8688d)) 426 | 427 | - **README**: With pylint modification dev command was updated 428 | ([`305759c`](https://github.com/enix/pvecontrol/commit/305759ce51961bde3dcdfb25265268f49d794c2c)) 429 | 430 | ### Features 431 | 432 | - --columns flag 433 | ([`2397102`](https://github.com/enix/pvecontrol/commit/239710200aabbdfb642104eee134be993f1d101b)) 434 | 435 | - Add --filter flag to node, task and vm 436 | ([`4dfbb52`](https://github.com/enix/pvecontrol/commit/4dfbb527593e5e84d66ec68723d268bc0fb977d4)) 437 | 438 | - Add --output option to list commands (supports text, json, csv and yaml) 439 | ([`c1ee523`](https://github.com/enix/pvecontrol/commit/c1ee5233034f86baa4c62acea9a0a16da9c69515)) 440 | 441 | - Add --sort-by flag 442 | ([`0a0cf4d`](https://github.com/enix/pvecontrol/commit/0a0cf4dc12103a00a74e1f5204afcad69922ae18)) 443 | 444 | - Add completion generation 445 | ([`3a38437`](https://github.com/enix/pvecontrol/commit/3a384374bf1dcd48abc7eccf1740170b1e7eea80)) 446 | 447 | - Add sanitycheck VM has HA disks 448 | ([`bcf535a`](https://github.com/enix/pvecontrol/commit/bcf535a174efee7d13242f80d31472174793f6d2)) 449 | 450 | - Add sanitycheck VM has HA disks 451 | ([`2722de8`](https://github.com/enix/pvecontrol/commit/2722de87f531da115e3a8a1ffdee2e409dabde90)) 452 | 453 | - Add shell-like globbing on nodeevacuate --target flag 454 | ([`d7b3393`](https://github.com/enix/pvecontrol/commit/d7b3393ace577c14ac24cfd72064b01e935d1fe9)) 455 | 456 | based on fnmatch.fnmatchcase from python stdlib 457 | 458 | - Add support for authentication tokens 459 | ([`7a913a8`](https://github.com/enix/pvecontrol/commit/7a913a8181383c016b4839088f0f74949a0e34f1)) 460 | 461 | - Columns name validation (--sort-by & --filter flags) 462 | ([`7cd0bdb`](https://github.com/enix/pvecontrol/commit/7cd0bdb94d41f600dc1ac8cca1b623715fa3204b)) 463 | 464 | - Implement cpufactor and memoryminimum by cluster 465 | ([`bab64bf`](https://github.com/enix/pvecontrol/commit/bab64bf302c82d7c73b24b7db2ecc11c55cff136)) 466 | 467 | - **auth**: Add some checks on token auth 468 | ([`4394cde`](https://github.com/enix/pvecontrol/commit/4394cdec2d140d2f03c2cc519241cb5182a9d870)) 469 | 470 | - **auth**: Allow command on user, password config attributes 471 | ([`c628f96`](https://github.com/enix/pvecontrol/commit/c628f96c7a4d860560f2c2be85820e46e3ce363a)) 472 | 473 | - **auth**: Allow command on user, password config attributes 474 | ([`ffe12dc`](https://github.com/enix/pvecontrol/commit/ffe12dc3a04bbf60bdf593754b06eb698a165968)) 475 | 476 | - **auth**: Allow command on user, password config attributes 477 | ([`92cf67d`](https://github.com/enix/pvecontrol/commit/92cf67d45cb316b981fcbb71b902bd5e5820e8d2)) 478 | 479 | - **auth**: Update README.md 480 | ([`01ca6fe`](https://github.com/enix/pvecontrol/commit/01ca6fecbd2fe8624111022fcdcb9c0fce728414)) 481 | 482 | - **node**: Nodeevacuate add --wait flag 483 | ([`edae9da`](https://github.com/enix/pvecontrol/commit/edae9da4db0d7678f85b6f9d1a165585fe76c936)) 484 | 485 | - **sanitycheck**: Check HA VM has cpu type != host 486 | ([`d5c7d1f`](https://github.com/enix/pvecontrol/commit/d5c7d1f0b7870818eed68aa334ad431af2cb17b2)) 487 | 488 | - **sanitycheck**: Rewrite logic to run tests 489 | ([`c8153c9`](https://github.com/enix/pvecontrol/commit/c8153c9fac5d991ff66aff909cbfe31dfd171eb9)) 490 | 491 | - **sanitychecks**: Add colors support on ASCII icons 492 | ([`e8097c9`](https://github.com/enix/pvecontrol/commit/e8097c92d72ea993fe99b915df6a0d726b3eaf1b)) 493 | 494 | - **storagelist**: Add missing --filter flag 495 | ([`4d644fd`](https://github.com/enix/pvecontrol/commit/4d644fde98e2f8fe260509bc0ecb5de0ece8e8ea)) 496 | 497 | - **storagelist**: Add storages list group shared by storage name 498 | ([`361fc39`](https://github.com/enix/pvecontrol/commit/361fc39ec95ecbfc581ad05d78e2ea210c121192)) 499 | 500 | - **tasks**: Taskget add --wait flag 501 | ([`011db78`](https://github.com/enix/pvecontrol/commit/011db785d9a47ff33a25ff02ba56c969f0b06a98)) 502 | 503 | - **vm**: Vmmigrate add --wait flag 504 | ([`8b8f1f1`](https://github.com/enix/pvecontrol/commit/8b8f1f1f8dd0f48a45516f41507b83a2f5963f5d)) 505 | 506 | ### Refactoring 507 | 508 | - Default values of PVE objects (node, vm & task) 509 | ([`4ca64ea`](https://github.com/enix/pvecontrol/commit/4ca64eaa4dfdce67c3d384901cd2db1a0e46e101)) 510 | 511 | - Move tests in src directory 512 | ([`98755da`](https://github.com/enix/pvecontrol/commit/98755da38f83ef3ef1daed953edfbdfa816699c2)) 513 | 514 | - Simplify print_task 515 | ([`c65104f`](https://github.com/enix/pvecontrol/commit/c65104fc30cba247f00bb218665ae47a498a753e)) 516 | 517 | 518 | ## v0.4.0 (2024-11-04) 519 | 520 | ### Bug Fixes 521 | 522 | - Patch test 523 | ([`902fd2a`](https://github.com/enix/pvecontrol/commit/902fd2ac90ffd5b2282a45094d1a6dcc32df9020)) 524 | 525 | - Pep8 compliance update private method 526 | ([`8d260b3`](https://github.com/enix/pvecontrol/commit/8d260b3c5530218c8e2a03ff514a35ba98709962)) 527 | 528 | ### Features 529 | 530 | - Split actions into mutliple files, add a config.py to resolve issue due to global validconfig 531 | ([`32f4534`](https://github.com/enix/pvecontrol/commit/32f4534aa8702a82d800d1f1e3e099cae11e762b)) 532 | 533 | 534 | ## v0.3.1 (2024-10-02) 535 | 536 | ### Bug Fixes 537 | 538 | - **package**: Fix module package and install 539 | ([`858d20f`](https://github.com/enix/pvecontrol/commit/858d20f6bde59b0140a1ca7dd68afd5818bfe05e)) 540 | 541 | ### Chores 542 | 543 | - **ci**: Update python version 544 | ([`c79b5a3`](https://github.com/enix/pvecontrol/commit/c79b5a341acdf840824b3e74f836be80b265bc39)) 545 | 546 | Also update action versions Run ci on dev branch 547 | 548 | 549 | ## v0.3.0 (2024-10-01) 550 | 551 | ### Bug Fixes 552 | 553 | - **node**: Add cast on memory 554 | ([`2298276`](https://github.com/enix/pvecontrol/commit/229827651a99dd68838b91b5b0a05900ede37220)) 555 | 556 | - **node**: Linter output 557 | ([`5e0bea1`](https://github.com/enix/pvecontrol/commit/5e0bea113ba728b457ed0d1bd7626be22f907e66)) 558 | 559 | - **PVENode**: Fix issue with offline node 560 | ([`a128eb3`](https://github.com/enix/pvecontrol/commit/a128eb3be53071503cdd6769279021ec0bb64d12)) 561 | 562 | - **task**: Fix issue with status refresh 563 | ([`43c72c0`](https://github.com/enix/pvecontrol/commit/43c72c01fac3ea5b03d931d7b44d13c226012f15)) 564 | 565 | - **task**: Revert fix for not available node 566 | ([`3cbd35a`](https://github.com/enix/pvecontrol/commit/3cbd35a98504b61b79aaba817117c32e2093facd)) 567 | 568 | ### Chores 569 | 570 | - **pvecontrol**: Add some debug 571 | ([`21d332a`](https://github.com/enix/pvecontrol/commit/21d332af8f416c060654169b604ed1ed0b23c980)) 572 | 573 | - **README**: Update config 574 | ([`4f62fc7`](https://github.com/enix/pvecontrol/commit/4f62fc7af56da5aaa7bd3ed388e4b071c7bab9a7)) 575 | 576 | ### Features 577 | 578 | - **cluster**: Add refresh 579 | ([`6b50de5`](https://github.com/enix/pvecontrol/commit/6b50de5bd36becbface74638a6fb6f4345e074f4)) 580 | 581 | Allow to refresh all clusters objects 582 | 583 | - **global**: Complete rewrite into classes 584 | ([`119b01c`](https://github.com/enix/pvecontrol/commit/119b01cc42355d93db1f8c8afd77427ce1b7a5ab)) 585 | 586 | Create classes to manage PVECluster, PVENode and PVEVM objects. This will allow lot more simple 587 | operations now. 588 | 589 | - **pvecontrol**: Add nodeevacuate feature 590 | ([`206dc83`](https://github.com/enix/pvecontrol/commit/206dc8322cf8fc7eb0bae3c3385fef4bd7f8a7ea)) 591 | 592 | This fonction allow to automatically migrate out all the VMs from a node to another ones. 593 | 594 | - **sanitycheck**: Add feature to check for cluster good rules 595 | ([`f649380`](https://github.com/enix/pvecontrol/commit/f64938077d41a745c04b306d27754c43a1500c67)) 596 | 597 | - **task**: Add internal decode_log fonction 598 | ([`dad15b2`](https://github.com/enix/pvecontrol/commit/dad15b22e1fdeade956bfadba66283d0aa9ce835)) 599 | 600 | - **tasks**: Rewrite task using class 601 | ([`df528be`](https://github.com/enix/pvecontrol/commit/df528be678de9b6cfd2cf822ffcaedaf7db2c280)) 602 | 603 | - **vm**: Add vm.migrate 604 | ([`9b01237`](https://github.com/enix/pvecontrol/commit/9b012372c038e451b2898fdff9f0a169940300d4)) 605 | 606 | This new fonction take over management of VM migration 607 | 608 | 609 | ## v0.2.0 (2023-10-06) 610 | 611 | ### Bug Fixes 612 | 613 | - **config**: Use a more comprehensive default 614 | ([`5b9d4fc`](https://github.com/enix/pvecontrol/commit/5b9d4fc98ab6461cab30a89d242a440425e81b3c)) 615 | 616 | - **nodelist**: Add some defaults for optional 617 | ([`f1ece32`](https://github.com/enix/pvecontrol/commit/f1ece32b936d1f8d187abd4426fa08de0a0ce6e8)) 618 | 619 | - **pvecontrol**: Convert vmid to int 620 | ([`bb493fa`](https://github.com/enix/pvecontrol/commit/bb493fa4d1dac1f9e8d8e888957e6684e3f706be)) 621 | 622 | - **pvecontrol**: Update parser help output 623 | ([`4a484f3`](https://github.com/enix/pvecontrol/commit/4a484f310c0c23dfababd7e700bf4d3678653fee)) 624 | 625 | - **requirements**: Bump proxmoxer version 626 | ([`09fd1fb`](https://github.com/enix/pvecontrol/commit/09fd1fb06e0c4b1f37e315855f2ae9260bf8b6ce)) 627 | 628 | ### Chores 629 | 630 | - **debug**: Add some debug lines 631 | ([`151c31d`](https://github.com/enix/pvecontrol/commit/151c31df02d6da841debc85a7a3cb1df004c18b0)) 632 | 633 | - **README**: Add badges, fix typos 634 | ([`e51df76`](https://github.com/enix/pvecontrol/commit/e51df76aa0625c0c9e40ed95215c16ce06964175)) 635 | 636 | - **README**: More complete documentation 637 | ([`384749c`](https://github.com/enix/pvecontrol/commit/384749c052b2fa37cd152b175aca517d116b8fc9)) 638 | 639 | - **setup**: Change description 640 | ([`542f9d7`](https://github.com/enix/pvecontrol/commit/542f9d7594fa47384753335e57382505b6bb2b16)) 641 | 642 | - **setup**: Change README content type 643 | ([`08aabcb`](https://github.com/enix/pvecontrol/commit/08aabcbde7cc6630e86029da965ed1fb25d84a91)) 644 | 645 | ### Features 646 | 647 | - **vmmigrate**: Add dry-run 648 | ([`9a5941e`](https://github.com/enix/pvecontrol/commit/9a5941e8639bc0c5010bbaa1911fe6ac368a36d1)) 649 | 650 | - **vmmigrate**: First version with migration 651 | ([`c908672`](https://github.com/enix/pvecontrol/commit/c908672e60ab60029a1b2c71ec9edce5edc41111)) 652 | 653 | 654 | ## v0.1.1 (2023-09-13) 655 | 656 | ### Bug Fixes 657 | 658 | - **pvecontrol**: Add missing empty line 659 | ([`b825a95`](https://github.com/enix/pvecontrol/commit/b825a9516a48c70a950cb8faf4bd9113f981ccbc)) 660 | 661 | ### Chores 662 | 663 | - **README**: Update python version 664 | ([`14b4ad1`](https://github.com/enix/pvecontrol/commit/14b4ad17cc2245db9540915c4fd1cf30acba1a80)) 665 | 666 | - **release**: Use unified release workflow 667 | ([`6c481a7`](https://github.com/enix/pvecontrol/commit/6c481a7f81c755fd685d6111071469955b0660a9)) 668 | 669 | 670 | ## v0.1.0 (2023-09-13) 671 | 672 | ### Features 673 | 674 | - **semantic-release**: Add semantic-release configuration 675 | ([`d1c86b5`](https://github.com/enix/pvecontrol/commit/d1c86b513fc3ab7a6b402b0156cd1b16b8481d4e)) 676 | 677 | This include semantic-release gh action to build a new release when push to main branch 678 | 679 | 680 | ## v0.0.1 (2023-09-13) 681 | 682 | ### Bug Fixes 683 | 684 | - **allocated_cpu**: Sockets is optional 685 | ([`a858e9b`](https://github.com/enix/pvecontrol/commit/a858e9b5d00f6dd6c848d27e1a135bb42f23eff4)) 686 | 687 | - **main**: Proper definition of main function 688 | ([`8253449`](https://github.com/enix/pvecontrol/commit/8253449163635a2f32f2e1e74dd76208b2beb853)) 689 | 690 | - **nodelist**: Skip offline nodes 691 | ([`9c67414`](https://github.com/enix/pvecontrol/commit/9c6741488365555f8a578eb029e4fead3dae47e5)) 692 | 693 | - **taskget**: Fix output of running tasks 694 | ([`b720c1b`](https://github.com/enix/pvecontrol/commit/b720c1b30cf71dae66ce0828f84d45d497d4d2cb)) 695 | 696 | ### Chores 697 | 698 | - **package**: Add gh action package 699 | ([`477e243`](https://github.com/enix/pvecontrol/commit/477e243f41abc8cfe9621f601210dcf65d49df97)) 700 | 701 | - **requirements**: Update requirements.txt 702 | ([`8d58ae1`](https://github.com/enix/pvecontrol/commit/8d58ae1fec5dd0810250bde2beedb365fcf09352)) 703 | 704 | - **test**: Add simple test 705 | ([`2a6e14e`](https://github.com/enix/pvecontrol/commit/2a6e14e663023df32e333cdf917228179eba1b40)) 706 | 707 | ### Features 708 | 709 | - **get_nodes**: New fonction 710 | ([`41fc8ae`](https://github.com/enix/pvecontrol/commit/41fc8aefdd7dfe314e4f836a9dbb3013105489c6)) 711 | 712 | - **gettasks**: New function 713 | ([`f479bf4`](https://github.com/enix/pvecontrol/commit/f479bf475506219623ff178eee3e5d8ccce5c1a2)) 714 | 715 | - **packaging**: Package the app 716 | ([`63e3bec`](https://github.com/enix/pvecontrol/commit/63e3bec60619d4db58077265fac4746d90213c24)) 717 | 718 | - **taskget**: Get task details 719 | ([`f2a4b14`](https://github.com/enix/pvecontrol/commit/f2a4b143c145a5a60ccebd8404ab11cc32652158)) 720 | 721 | - **taskget**: Get task informations and logs 722 | ([`85f1e1c`](https://github.com/enix/pvecontrol/commit/85f1e1c38fcdf5dddf47810453c09bcb69adaf2f)) 723 | 724 | - **tasklist**: Rename function 725 | ([`3b449f5`](https://github.com/enix/pvecontrol/commit/3b449f56ce8ca939e17e15f1ee3fcd04c61c2b52)) 726 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Proxmox VE Control 2 | 3 | ![release workflow](https://github.com/enix/pvecontrol/actions/workflows/release.yml/badge.svg?branch=main) 4 | ![PyPI release](https://img.shields.io/pypi/v/pvecontrol.svg) 5 | ![PyPI downloads](https://img.shields.io/pypi/dm/pvecontrol.svg) 6 | 7 | `pvecontrol` () is a CLI tool to manage Proxmox VE clusters and perform intermediate and advanced tasks that aren't available (or aren't straightforward) in the Proxmox web UI or default CLI tools. 8 | 9 | It was written by (and for) teams managing multiple Proxmox clusters, sometimes with many hypervisors. Conversely, if your Proxmox install consists of a single cluster with a single node, the features of `pvecontrol` might not be very interesting for you! 10 | 11 | Here are a few examples of things you can do with `pvecontrol`: 12 | 13 | - List all VMs across all hypervisors, along with their state and size; 14 | - Evacuate (=drain) a hypervisor, i.e. migrate all VMs that are running on that hypervisor, automatically picking nodes with enough capacity to host these VMs. 15 | - Run sanity checks on a cluster. Sanity checks are sets of tests designed to verify the integrity of the cluster. 16 | 17 | To communicate with Proxmox VE, `pvecontrol` uses [proxmoxer](https://pypi.org/project/proxmoxer/), a wonderful library that enables communication with various Proxmox APIs. 18 | 19 | ## Installation 20 | 21 | `pvecontrol` requires Python version 3.9 or above. 22 | 23 | The easiest way to install it is simply using pip. New versions are automatically published to [pypi](https://pypi.org/project/pvecontrol/) repository. It is recommended to use `pipx` in order to automatically create a dedicated python virtual environment: 24 | 25 | ```shell 26 | pipx install pvecontrol 27 | ``` 28 | 29 | ## Configuration 30 | 31 | To use `pvecontrol`, you must create a YAML configuration in `$HOME/.config/pvecontrol/config.yaml`. That file will list your clusters and how to authenticate with them. 32 | 33 | `pvecontrol` only uses the Proxmox HTTP API, which means that you can use most Proxmox authentication mechanisms, including `@pve` realm users and tokens. 34 | 35 | HTTPS certificate verification is disabled by default, but can be enabled using the `ssl_verify` boolean. 36 | 37 | As an example, here's how to set up a dedicated user for `pvecontrol`, with read-only access to the Proxmox API: 38 | 39 | ```shell 40 | pveum user add pvecontrol@pve --password my.password.is.weak 41 | pveum acl modify / --roles PVEAuditor --users pvecontrol@pve 42 | ``` 43 | 44 | You can then create the following configuration file in `$HOME/.config/pvecontrol/config.yaml`: 45 | 46 | ```yaml 47 | clusters: 48 | - name: fr-par-1 49 | host: localhost 50 | user: pvecontrol@pve 51 | password: my.password.is.weak 52 | ssl_verify: true 53 | ``` 54 | 55 | And see `pvecontrol` in action right away: 56 | 57 | ```shell 58 | pvecontrol -c fr-par-1 vm list 59 | ``` 60 | 61 | If you plan to use `pvecontrol` to move VMs around, you should grant it `PVEVMAdmin` permissions: 62 | 63 | ```shell 64 | pveum acl modify / --roles PVEVMAdmin --users pvecontrol@pve 65 | ``` 66 | 67 | ### API tokens 68 | 69 | `pvecontrol` also supports authentication with API tokens. A Proxmox API token is associated to an individual user, and can be given separate permissions and expiration dates. You can learn more about Proxmox tokens in [this section of the Proxmox documentation](https://pve.proxmox.com/pve-docs/pveum-plain.html#pveum_tokens). 70 | 71 | As an example, to create a new API token associated to the `pvecontrol@pve` user and inherit all its permissions, you can use the following command: 72 | 73 | ```shell 74 | pveum user token add pvecontrol@pve mytoken --privsep 0 75 | ``` 76 | 77 | Then, retrieve the token value, and add it to the configuration file to use it to authenticate: 78 | 79 | ```yaml 80 | clusters: 81 | - name: fr-par-1 82 | host: localhost 83 | user: pvecontrol@pve 84 | token_name: mytoken 85 | token_value: randomtokenvalue 86 | ``` 87 | 88 | ### Reverse proxies 89 | 90 | `pvecontrol` supports certificate-based authentication to a reverse proxy. Which makes it suitable for use with tools like [teleport](https://goteleport.com/docs/enroll-resources/application-access/guides/connecting-apps/) using teleport apps. 91 | 92 | ```yaml 93 | clusters: 94 | - name: fr-par-1 95 | host: localhost 96 | user: pvecontrol@pve 97 | password: my.password.is.weak 98 | proxy_certificate_path: /tmp/proxmox-reverse-proxy.pem 99 | proxy_certificate_key_path: /tmp/proxmox-reverse-proxy 100 | ``` 101 | 102 | You can also use command substitution syntax and the key `proxy_certificate` to execute a command that will output a JSON document containing the certificate and key paths. 103 | 104 | ```yaml 105 | clusters: 106 | - name: fr-par-1 107 | host: localhost 108 | user: pvecontrol@pve 109 | password: my.password.is.weak 110 | proxy_certificate: $(my_custom_command login proxmox-fr-par-1) 111 | ``` 112 | 113 | It should output something like this: 114 | 115 | ```json 116 | { 117 | "cert": "/tmp/proxmox-reverse-proxy.pem", 118 | "key": "/tmp/proxmox-reverse-proxy", 119 | "anything_else": "it is ok to have other fields, they will be ignored. this is to support existing commands" 120 | } 121 | ``` 122 | 123 | CAUTION: environment variable and `~` expansion and are not supported. 124 | 125 | ### Better security 126 | 127 | Instead of specifying users, passwords and certificates paths in plain text in the configuration file, you can use the shell command substitution syntax `$(...)` inside the `user`, `password`, `proxy_certificate` fields; for instance: 128 | 129 | ```yaml 130 | clusters: 131 | - name: prod-cluster-1 132 | host: 10.10.10.10 133 | user: pvecontrol@pve 134 | ssl_verify: true 135 | password: $(command to get -password) 136 | ``` 137 | 138 | ### Worse security 139 | 140 | You _can_ use `@pam` users (and even `root@pam`) and passwords in the `pvecontrol` YAML configuration file; but you probably _should not_, as anyone with read access to the configuration file would then automatically gain shell access to your Proxmox hypervisor. _Not recommended in production!_ 141 | 142 | ### Advanced configuration options 143 | 144 | The configuration file can include a `node:` section to specify CPU and memory policies. These will be used when scheduling a VM (i.e. determine on which node it should run), specifically when draining a node for maintenance. 145 | 146 | There are currently two parameters: `cpufactor` and `memoryminimum`. 147 | 148 | `cpufactor` indicates the level of overcommit allowed on a hypervisor. `1` means no overcommit at all; `5` means "a hypervisor with 8 cores can run VMs with up to 5x8 = 40 cores in total". 149 | 150 | `memoryminimum` is the amount of memory that should always be available on a node, in bytes. When scheduling a VM (for instance, when automatically moving VMs around), `pvecontrol` will make sure that this amount of memory remains available for the hypervisor OS itself. Caution: if that amount is set to zero, it will be possible to allocate the entire host memory to virtual machines, leaving no memory for the hypervisor operating system and management daemons! 151 | 152 | These options can be specified in a global `node:` section, and then overridden per cluster. 153 | 154 | Here is a configuration file showing this in action: 155 | 156 | ```yaml 157 | --- 158 | node: 159 | # Overcommit CPU factor 160 | # 1 = no overcommit 161 | cpufactor: 2.5 162 | # Memory to reserve for the system on a node (in bytes) 163 | memoryminimum: 8589934592 164 | clusters: 165 | - name: my-test-cluster 166 | host: 192.168.1.10 167 | user: pvecontrol@pve 168 | password: superpasssecret 169 | # Override global values for this cluster 170 | node: 171 | cpufactor: 1 172 | - name: prod-cluster-1 173 | host: 10.10.10.10 174 | user: pvecontrol@pve 175 | password: Supers3cUre 176 | - name: prod-cluster-2 177 | host: 10.10.10.10 178 | user: $(command to get -user) 179 | password: $(command to get -password) 180 | - name: prod-cluster-3 181 | host: 10.10.10.10 182 | user: morticia@pve 183 | token_name: pvecontrol 184 | token_value: 12345678-abcd-abcd-abcd-1234567890ab 185 | 186 | ``` 187 | 188 | ## Usage 189 | 190 | Here is a quick overview of `pvecontrol` commands and options, it may evolve over time: 191 | 192 | ```shell 193 | $ pvecontrol --help 194 | Usage: pvecontrol [OPTIONS] COMMAND [ARGS]... 195 | 196 | Proxmox VE control CLI, version: x.y.z 197 | 198 | Options: 199 | -d, --debug 200 | -o, --output [text|json|csv|yaml|md] 201 | [default: text] 202 | -c, --cluster NAME Proxmox cluster name as defined in 203 | configuration [required] 204 | --unicode / --no-unicode Use unicode characters for output 205 | --color / --no-color Use colorized output 206 | --help Show this message and exit. 207 | 208 | Commands: 209 | node evacuate Evacuate a node by migrating all it's VM out to one or... 210 | node list List nodes in the cluster 211 | sanitycheck Check status of proxmox Cluster 212 | status Show cluster status 213 | storage list List storages in the cluster 214 | task get Get detailled information about a task 215 | task list List tasks in the cluster 216 | vm list List VMs in the cluster 217 | vm migrate Migrate VMs in the cluster 218 | 219 | Made with love by Enix.io 220 | ``` 221 | 222 | `pvecontrol` works with subcommands for each operation. Operation related to a specific kind of object (tasks for instance) will be grouped into their own subcommand group. Each subcommand has its own help: 223 | 224 | ```shell 225 | $ pvecontrol task get --help 226 | Usage: pvecontrol task get [OPTIONS] UPID 227 | 228 | Options: 229 | -f, --follow Wait task end 230 | -w, --wait Follow task log output 231 | --help Show this message and exit. 232 | ``` 233 | 234 | The `-c` or `--cluster` flag is required in order to indicate on which cluster we want to work. 235 | 236 | The simplest operation we can do to check that `pvecontrol` works correctly, and that authentication has been configured properly is `status`: 237 | 238 | ```shell 239 | $ pvecontrol --cluster my-test-cluster status 240 | INFO:root:Proxmox cluster: my-test-cluster 241 | 242 | Status: healthy 243 | VMs: 0 244 | Templates: 0 245 | Metrics: 246 | CPU: 0.00/64(0.0%), allocated: 0 247 | Memory: 0.00 GiB/128.00 GiB(0.0%), allocated: 0.00 GiB 248 | Disk: 0.00 GiB/2.66 TiB(0.0%) 249 | Nodes: 250 | Offline: 0 251 | Online: 3 252 | Unknown: 0 253 | ``` 254 | 255 | If this works, we're good to go! 256 | 257 | ## Environment variables 258 | 259 | `pvecontrol` supports the following environment variables: 260 | - `PVECONTROL_CLUSTER`: the default cluster to use when no `-c` or `--cluster` option is specified. 261 | - `PVECONTROL_COLOR`: if set to `False`, it will disable all colorized output. 262 | - `PVECONTROL_UNICODE`: if set to `False`, it will disable all unicode output. 263 | 264 | 265 | ## Shell completion 266 | 267 | `pvecontrol` provides a completion helper to generate completion configuration for common shells. It currently supports `bash`, `tcsh`, and `zsh`. 268 | 269 | You can adapt the following commands to your environment: 270 | 271 | ```shell 272 | # bash 273 | _PVECONTROL_COMPLETE=bash_source pvecontrol > "${BASH_COMPLETION_USER_DIR:-${XDG_DATA_HOME:-$HOME/.local/share}/bash-completion}/completions/pvecontrol" 274 | # zsh 275 | _PVECONTROL_COMPLETE=zsh_source pvecontrol > "${HOME}/.zsh/completions/_pvecontrol" 276 | # fish 277 | _PVECONTROL_COMPLETE=fish_source pvecontrol > {$HOME}/.config/fish/completions/pvecontrol.fish 278 | ``` 279 | 280 | ## Development 281 | 282 | If you want to tinker with the code, all the required dependencies are listed in `requirements.txt`, and you can install them e.g. with pip: 283 | 284 | ```shell 285 | pip3 install -r requirements.txt -e . 286 | ``` 287 | 288 | Then you can run the script directly like so: 289 | 290 | ```shell 291 | pvecontrol -h 292 | ``` 293 | 294 | ## Contributing 295 | 296 | This project use _semantic versioning_ with the [python-semantic-release](https://python-semantic-release.readthedocs.io/en/latest/) toolkit to automate the release process. All commits must follow the [Angular Commit Message Conventions](https://github.com/angular/angular/blob/master/CONTRIBUTING.md#-commit-message-format). Repository `main` branch is also protected to prevent accidental releases. All updates must go through a PR with a review. 297 | 298 | --- 299 | 300 | Made with :heart: by Enix () :monkey: from Paris :fr:. 301 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "setuptools.build_meta" 3 | requires = ["setuptools", "wheel"] 4 | 5 | [tool.semantic_release] 6 | version_variables = ["setup.py:__version__"] 7 | assets = [] 8 | commit_message = "{version}\n\nAutomatically generated by python-semantic-release" 9 | commit_parser = "angular" 10 | logging_use_named_masks = false 11 | major_on_zero = true 12 | tag_format = "v{version}" 13 | 14 | [tool.semantic_release.branches.main] 15 | match = "(main|master)" 16 | prerelease_token = "rc" 17 | prerelease = false 18 | 19 | [tool.semantic_release.changelog] 20 | template_dir = "templates" 21 | changelog_file = "CHANGELOG.md" 22 | exclude_commit_patterns = [] 23 | 24 | [tool.semantic_release.changelog.environment] 25 | block_start_string = "{%" 26 | block_end_string = "%}" 27 | variable_start_string = "{{" 28 | variable_end_string = "}}" 29 | comment_start_string = "{#" 30 | comment_end_string = "#}" 31 | trim_blocks = false 32 | lstrip_blocks = false 33 | newline_sequence = "\n" 34 | keep_trailing_newline = false 35 | extensions = [] 36 | autoescape = true 37 | 38 | [tool.semantic_release.commit_author] 39 | env = "GIT_COMMIT_AUTHOR" 40 | default = "Monkeynator " 41 | 42 | [tool.semantic_release.commit_parser_options] 43 | allowed_tags = ["build", "chore", "ci", "docs", "feat", "fix", "perf", "style", "refactor", "test"] 44 | minor_tags = ["feat"] 45 | patch_tags = ["fix", "perf"] 46 | 47 | [tool.semantic_release.remote] 48 | name = "origin" 49 | type = "github" 50 | ignore_token_for_push = false 51 | 52 | [tool.semantic_release.remote.token] 53 | env = "GH_TOKEN" 54 | 55 | [tool.semantic_release.publish] 56 | dist_glob_patterns = ["dist/*"] 57 | upload_to_vcs_release = true 58 | 59 | [tool.black] 60 | line-length = 120 61 | exclude = ''' 62 | /( 63 | .git 64 | .tox 65 | .nox 66 | .venv 67 | build 68 | dist 69 | )/ 70 | ''' 71 | 72 | [tool.pylint.'MESSAGES CONTROL'] 73 | max-line-length = 150 74 | disable = ''' 75 | no-member, 76 | missing-module-docstring, 77 | missing-function-docstring, 78 | too-many-instance-attributes, 79 | missing-class-docstring, 80 | too-few-public-methods, 81 | fixme, 82 | too-many-arguments, 83 | too-many-positional-arguments 84 | ''' 85 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | black 2 | pylint 3 | pytest 4 | responses 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | prettytable 2 | humanize 3 | confuse 4 | pyyaml 5 | urllib3 6 | shtab 7 | click 8 | 9 | # proxmoxer depends on 10 | proxmoxer>=2.0.1 11 | requests 12 | requests_toolbelt 13 | openssh_wrapper 14 | paramiko 15 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = pvecontrol 3 | version = attr: setup.py:__version__ 4 | author = Laurent Corbes 5 | author_email = laurent.corbes@enix.fr 6 | description = Proxmox VE control 7 | long_description = file: README.md 8 | long_description_content_type = text/markdown 9 | licence = GPLv3 10 | 11 | [options] 12 | python_requires= >=3.9 13 | package_dir= 14 | =src 15 | packages=find: 16 | include_package_data = True 17 | install_requires = file: requirements.txt 18 | 19 | [options.packages.find] 20 | where=src 21 | 22 | [options.package_data] 23 | * = *.yaml 24 | 25 | [options.entry_points] 26 | console_scripts = 27 | pvecontrol = pvecontrol:main 28 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | __version__ = "0.6.0" 4 | 5 | setup(version=__version__) 6 | -------------------------------------------------------------------------------- /src/pvecontrol/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import logging 5 | import signal 6 | 7 | from types import SimpleNamespace 8 | from importlib.metadata import version 9 | 10 | import click 11 | import urllib3 12 | 13 | from pvecontrol import actions 14 | from pvecontrol.utils import OutputFormats 15 | 16 | 17 | def get_leaf_command(cmd, ctx, args): 18 | if len(args) == 0: 19 | return cmd, [] 20 | 21 | # remove options from args 22 | parser = cmd.make_parser(ctx) 23 | _, args_without_options, _ = parser.parse_args(list(args)) 24 | 25 | if len(args_without_options) == 0: 26 | return cmd, args 27 | 28 | # resolve sub command 29 | name, sub_cmd, sub_args = cmd.resolve_command(ctx, args_without_options) 30 | if isinstance(sub_cmd, click.MultiCommand) and len(sub_args) > 0: 31 | sub_ctx = sub_cmd.make_context(name, sub_args, parent=ctx) 32 | return get_leaf_command(sub_cmd, sub_ctx, sub_args) 33 | 34 | return sub_cmd, sub_args 35 | 36 | 37 | # Patch click to ignore required parameters when --help is passed 38 | class IgnoreRequiredForHelp(click.Group): 39 | def __init__(self, *args, **kwargs): 40 | super().__init__(*args, **kwargs) 41 | self.ignoring = False 42 | 43 | def _is_defaulting_to_help(self, ctx, args): 44 | try: 45 | leaf_cmd, leaf_args = get_leaf_command(self, ctx, args) 46 | 47 | # keep the default behavior when no subcommand is passed or when the subcommand doesn't exists 48 | if leaf_cmd is None or leaf_cmd is self: 49 | return False 50 | 51 | return ( 52 | "--help" in leaf_args 53 | or (isinstance(leaf_cmd, click.MultiCommand) and not leaf_cmd.invoke_without_command) 54 | or (leaf_cmd.no_args_is_help and len(leaf_args) == 0) 55 | ) 56 | except click.exceptions.UsageError: 57 | return False 58 | 59 | def parse_args(self, ctx, args): 60 | if self._is_defaulting_to_help(ctx, args): 61 | self.ignoring = True 62 | for param in self.params: 63 | param.required = False 64 | 65 | return super().parse_args(ctx, args) 66 | 67 | def format_commands(self, ctx, formatter) -> None: 68 | commands = [] 69 | for subcommand in self.list_commands(ctx): 70 | cmd = self.get_command(ctx, subcommand) 71 | if cmd is None or cmd.hidden: 72 | continue 73 | 74 | commands.append((subcommand, cmd)) 75 | 76 | if len(commands) > 0: 77 | limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) 78 | 79 | rows = [] 80 | for subcommand, cmd in commands: 81 | if not isinstance(cmd, click.MultiCommand): 82 | cmd_help = cmd.get_short_help_str(limit) 83 | rows.append((subcommand, cmd_help)) 84 | continue 85 | for subsubcommand in cmd.list_commands(ctx): 86 | subcmd = cmd.get_command(ctx, subsubcommand) 87 | if subcmd is None or subcmd.hidden: 88 | continue 89 | cmd_help = subcmd.get_short_help_str(limit) 90 | rows.append((f"{subcommand} {subsubcommand}", cmd_help)) 91 | 92 | if rows: 93 | with formatter.section("Commands"): 94 | formatter.write_dl(rows) 95 | 96 | 97 | @click.group( 98 | cls=IgnoreRequiredForHelp, 99 | help=f"Proxmox VE control CLI, version: {version(__name__)}", 100 | epilog="Made with love by Enix.io", 101 | ) 102 | @click.option("-d", "--debug", is_flag=True) 103 | @click.option( 104 | "-o", 105 | "--output", 106 | type=click.Choice([o.value for o in OutputFormats]), 107 | show_default=True, 108 | default=OutputFormats.TEXT.value, 109 | callback=lambda *v: OutputFormats(v[2]), 110 | ) 111 | @click.option( 112 | "-c", 113 | "--cluster", 114 | metavar="NAME", 115 | envvar="CLUSTER", 116 | required=True, 117 | help="Proxmox cluster name as defined in configuration", 118 | ) 119 | @click.option( 120 | "--unicode/--no-unicode", 121 | envvar="UNICODE", 122 | default=True, 123 | help="Use unicode characters for output", 124 | ) 125 | @click.option( 126 | "--color/--no-color", 127 | envvar="COLOR", 128 | default=True, 129 | help="Use colorized output", 130 | ) 131 | @click.pass_context 132 | def pvecontrol(ctx, debug, output, cluster, unicode, color): 133 | signal.signal(signal.SIGINT, lambda *_: sys.exit(130)) 134 | # Disable urllib3 warnings about invalid certs 135 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 136 | 137 | if not ctx.command.ignoring: 138 | # get cli arguments 139 | args = SimpleNamespace(output=output, cluster=cluster, unicode=unicode, color=color) 140 | 141 | # configure logging 142 | logging.basicConfig(encoding="utf-8", level=logging.DEBUG if debug else logging.INFO) 143 | logging.debug("Arguments: %s", args) 144 | 145 | ctx.ensure_object(dict) 146 | ctx.obj["args"] = args 147 | 148 | 149 | pvecontrol.add_command(cmd=actions.cluster.status, name="status") 150 | pvecontrol.add_command(cmd=actions.cluster.sanitycheck, name="sanitycheck") 151 | pvecontrol.add_command(cmd=actions.node.root, name="node") 152 | pvecontrol.add_command(cmd=actions.storage.root, name="storage") 153 | pvecontrol.add_command(cmd=actions.task.root, name="task") 154 | pvecontrol.add_command(cmd=actions.vm.root, name="vm") 155 | 156 | 157 | def main(): 158 | # pylint: disable=no-value-for-parameter 159 | pvecontrol(auto_envvar_prefix="PVECONTROL") 160 | 161 | 162 | if __name__ == "__main__": 163 | sys.exit(main()) 164 | -------------------------------------------------------------------------------- /src/pvecontrol/__main__.py: -------------------------------------------------------------------------------- 1 | import pvecontrol 2 | 3 | if __name__ == "__main__": 4 | pvecontrol.main() 5 | -------------------------------------------------------------------------------- /src/pvecontrol/actions/__init__.py: -------------------------------------------------------------------------------- 1 | from . import cluster, node, storage, task, vm 2 | -------------------------------------------------------------------------------- /src/pvecontrol/actions/cluster.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import click 4 | 5 | from humanize import naturalsize 6 | 7 | from pvecontrol.models.node import NodeStatus 8 | from pvecontrol.sanitycheck import SanityCheck 9 | from pvecontrol.sanitycheck.tests import DEFAULT_CHECK_IDS 10 | from pvecontrol.models.cluster import PVECluster 11 | from pvecontrol.utils import OutputFormats, render_output 12 | 13 | 14 | @click.command() 15 | @click.pass_context 16 | def status(ctx): 17 | """Show cluster status""" 18 | proxmox = PVECluster.create_from_config(ctx.obj["args"].cluster) 19 | cluster_status = "healthy" if proxmox.is_healthy else "not healthy" 20 | 21 | templates = sum(len(node.templates) for node in proxmox.nodes) 22 | vms = sum(len(node.vms) for node in proxmox.nodes) 23 | metrics = proxmox.metrics 24 | 25 | def _get_cpu_output(): 26 | c_usage = metrics["cpu"]["usage"] 27 | c_total = metrics["cpu"]["total"] 28 | c_percent = metrics["cpu"]["percent"] 29 | c_allocated = metrics["cpu"]["allocated"] 30 | return f"{c_usage:.2f}/{c_total} ({c_percent:.1f}%), allocated: {c_allocated}" 31 | 32 | def _get_memory_output(): 33 | m_usage = naturalsize(metrics["memory"]["usage"], binary=True, format="%.2f") 34 | m_total = naturalsize(metrics["memory"]["total"], binary=True, format="%.2f") 35 | m_percent = metrics["memory"]["percent"] 36 | m_allocated = naturalsize(metrics["memory"]["allocated"], binary=True, format="%.2f") 37 | return f"{m_usage}/{m_total}({m_percent:.1f}%), allocated: {m_allocated}" 38 | 39 | def _get_disk_output(): 40 | d_usage = naturalsize(metrics["disk"]["usage"], binary=True, format="%.2f") 41 | d_total = naturalsize(metrics["disk"]["total"], binary=True, format="%.2f") 42 | d_percent = metrics["disk"]["percent"] 43 | return f"{d_usage}/{d_total}({d_percent:.1f}%)" 44 | 45 | if ctx.obj["args"].output == OutputFormats.TEXT: 46 | print( 47 | f"""\n\ 48 | Status: {cluster_status} 49 | VMs: {vms - templates} 50 | Templates: {templates} 51 | Metrics: 52 | CPU: {_get_cpu_output()} 53 | Memory: {_get_memory_output()} 54 | Disk: {_get_disk_output()} 55 | Nodes: 56 | Offline: {len([node for node in proxmox.nodes if node.status == NodeStatus.OFFLINE])} 57 | Online: {len([node for node in proxmox.nodes if node.status == NodeStatus.ONLINE])} 58 | Unknown: {len([node for node in proxmox.nodes if node.status == NodeStatus.UNKNOWN])} 59 | """ 60 | ) 61 | else: 62 | render_table = [ 63 | { 64 | "status": cluster_status, 65 | "vm": vms - templates, 66 | "templates": templates, 67 | "metrics": metrics, 68 | "nodes": { 69 | "offline": len([node for node in proxmox.nodes if node.status == NodeStatus.OFFLINE]), 70 | "online": len([node for node in proxmox.nodes if node.status == NodeStatus.ONLINE]), 71 | "unknown": len([node for node in proxmox.nodes if node.status == NodeStatus.UNKNOWN]), 72 | }, 73 | } 74 | ] 75 | print(render_output(render_table, output=ctx.obj["args"].output)) 76 | 77 | 78 | @click.command() 79 | @click.argument("checks", nargs=-1, type=click.Choice(list(DEFAULT_CHECK_IDS), case_sensitive=False)) 80 | @click.pass_context 81 | def sanitycheck(ctx, checks): 82 | """Check status of proxmox Cluster""" 83 | # More checks to implement 84 | # VM is started but 'startonboot' not set 85 | # VM is running in cpu = host 86 | # VM is running in cpu = qemu64 87 | proxmox = PVECluster.create_from_config(ctx.obj["args"].cluster) 88 | sc = SanityCheck(proxmox, ctx.obj["args"].color, ctx.obj["args"].unicode) 89 | exitcode = sc.run(checks=set(checks)) 90 | sc.display() 91 | sys.exit(exitcode) 92 | -------------------------------------------------------------------------------- /src/pvecontrol/actions/node.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import click 4 | 5 | from pvecontrol.models.node import NodeStatus 6 | from pvecontrol.models.vm import VmStatus 7 | from pvecontrol.utils import print_task 8 | from pvecontrol.cli import ResourceGroup, migration_related_command 9 | from pvecontrol.models.node import COLUMNS 10 | from pvecontrol.models.cluster import PVECluster 11 | 12 | 13 | @click.group( 14 | cls=ResourceGroup, 15 | name="node", 16 | columns=COLUMNS, 17 | default_sort="node", 18 | list_callback=lambda proxmox: proxmox.nodes, 19 | ) 20 | def root(): 21 | pass 22 | 23 | 24 | @root.command() 25 | @click.argument("node", required=True) 26 | @click.argument("target", nargs=-1) 27 | @migration_related_command 28 | @click.option("--no-skip-stopped", is_flag=True, help="Don't skip VMs that are stopped") 29 | @click.pass_context 30 | # FIXME: remove pylint disable annotations 31 | # pylint: disable=too-many-branches,too-many-statements,too-many-locals 32 | def evacuate(ctx, node, target, dry_run, online, follow, wait, no_skip_stopped): 33 | """Evacuate a node by migrating all it's VM out to one or multiple target nodes""" 34 | # check node exists 35 | proxmox = PVECluster.create_from_config(ctx.obj["args"].cluster) 36 | srcnode = proxmox.find_node(node) 37 | logging.debug(srcnode) 38 | if not srcnode: 39 | print(f"Node {node} does not exist") 40 | return 41 | # check node is online 42 | if srcnode.status != NodeStatus.ONLINE: 43 | print(f"Node {node} is not online") 44 | return 45 | 46 | targets = [] 47 | # compute targets migration possible 48 | if target: 49 | for pattern in list(set(target)): 50 | nodes = proxmox.find_nodes(pattern) 51 | if not nodes: 52 | print(f"No node match the pattern {pattern}, skipping") 53 | continue 54 | # FIXME: remove pylint disable annotation 55 | # pylint: disable=redefined-argument-from-local 56 | for node in nodes: 57 | if node.node == srcnode.node: 58 | print(f"Target node {node.node} is the same as source node, skipping") 59 | continue 60 | if node.status != NodeStatus.ONLINE: 61 | print(f"Target node {node.node} is not online, skipping") 62 | continue 63 | targets.append(node) 64 | else: 65 | targets = [n for n in proxmox.nodes if n.status == NodeStatus.ONLINE and n.node != srcnode.node] 66 | if len(targets) == 0: 67 | print("No target node available") 68 | return 69 | # Make sure there is no duplicate in targets 70 | targets = list(set(targets)) 71 | logging.debug("Migration targets: %s", ([t.node for t in targets])) 72 | 73 | plan = [] 74 | for vm in srcnode.vms: 75 | logging.debug("Selecting node for VM: %i, maxmem: %i, cpus: %i", vm.vmid, vm.maxmem, vm.cpus) 76 | if vm.status != VmStatus.RUNNING and not no_skip_stopped: 77 | logging.debug("VM %i is not running, skipping", vm.vmid) 78 | continue 79 | # check ressources 80 | # FIXME: remove pylint disable annotation 81 | # pylint: disable=redefined-argument-from-local 82 | for target in targets: 83 | logging.debug( 84 | "Test target: %s, allocatedmem: %i, allocatedcpu: %i", 85 | target.node, 86 | target.allocatedmem, 87 | target.allocatedcpu, 88 | ) 89 | if (vm.maxmem + target.allocatedmem) > (target.maxmem - proxmox.config["node"]["memoryminimum"]): 90 | logging.debug("Discard target: %s, will overcommit ram", target.node) 91 | elif (vm.cpus + target.allocatedcpu) > (target.maxcpu * proxmox.config["node"]["cpufactor"]): 92 | logging.debug("Discard target: %s, will overcommit cpu", target.node) 93 | else: 94 | plan.append( 95 | { 96 | "vmid": vm.vmid, 97 | "vm": vm, 98 | "node": srcnode, 99 | "target": target, 100 | } 101 | ) 102 | target.allocatedmem += vm.maxmem 103 | target.allocatedcpu += vm.cpus 104 | logging.debug( 105 | "Selected target %s: new allocatedmem %i, new allocatedcpu %i", 106 | target.node, 107 | target.allocatedmem, 108 | target.allocatedcpu, 109 | ) 110 | break 111 | else: 112 | print(f"No target found for VM {vm.vmid} ({vm.name}), skipping") 113 | 114 | logging.debug(plan) 115 | # validate input 116 | if len(plan) == 0: 117 | print("No VM to migrate") 118 | return 119 | for p in plan: 120 | print(f"Migrating VM {p['vmid']} ({p['vm'].name}) from {p['node'].node} to {p['target'].node}") 121 | confirmation = input("Confirm (yes):") 122 | logging.debug("Confirmation input: %s", confirmation) 123 | if confirmation.lower() != "yes": 124 | print("Aborting") 125 | return 126 | # run migrations 127 | 128 | for p in plan: 129 | print(f"Migrate VM: {p['vmid']} / {p['vm'].name} from {p['node'].node} to {p['target'].node}") 130 | if not dry_run: 131 | upid = p["vm"].migrate(p["target"].node, online) 132 | logging.debug("Migration UPID: %s", upid) 133 | proxmox.refresh() 134 | _task = proxmox.find_task(upid) 135 | print_task(proxmox, upid, follow, wait) 136 | else: 137 | print("Dry run, skipping migration") 138 | -------------------------------------------------------------------------------- /src/pvecontrol/actions/storage.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | from pvecontrol.models.storage import PVEStorage, COLUMNS 4 | from pvecontrol.cli import ResourceGroup 5 | 6 | 7 | @click.group( 8 | cls=ResourceGroup, 9 | name="storage", 10 | columns=COLUMNS, 11 | default_sort="storage", 12 | list_callback=PVEStorage.get_flattened_grouped_list, 13 | ) 14 | def root(): 15 | pass 16 | -------------------------------------------------------------------------------- /src/pvecontrol/actions/task.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | from pvecontrol.utils import print_task 4 | from pvecontrol.cli import ResourceGroup, task_related_command 5 | from pvecontrol.models.task import COLUMNS 6 | from pvecontrol.models.cluster import PVECluster 7 | 8 | 9 | @click.group( 10 | cls=ResourceGroup, 11 | name="task", 12 | columns=COLUMNS, 13 | default_sort="starttime", 14 | list_callback=lambda proxmox: proxmox.tasks, 15 | ) 16 | def root(): 17 | pass 18 | 19 | 20 | @root.command() 21 | @click.argument("upid") 22 | @task_related_command 23 | @click.pass_context 24 | def get(ctx, upid, follow, wait): 25 | """Get detailled information about a task""" 26 | proxmox = PVECluster.create_from_config(ctx.obj["args"].cluster) 27 | print_task(proxmox, upid, follow, wait) 28 | -------------------------------------------------------------------------------- /src/pvecontrol/actions/vm.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | import click 5 | 6 | from pvecontrol.utils import print_task 7 | from pvecontrol.cli import ResourceGroup, migration_related_command 8 | from pvecontrol.models.vm import COLUMNS 9 | from pvecontrol.models.cluster import PVECluster 10 | 11 | 12 | @click.group( 13 | cls=ResourceGroup, 14 | name="VM", 15 | columns=COLUMNS, 16 | default_sort="vmid", 17 | list_callback=lambda proxmox: proxmox.vms, 18 | ) 19 | def root(): 20 | pass 21 | 22 | 23 | @root.command() 24 | @click.argument("vmid", type=int) 25 | @click.option( 26 | "-t", 27 | "--target", 28 | metavar="NODEID", 29 | required=True, 30 | help="ID of the target node", 31 | ) 32 | @migration_related_command 33 | @click.pass_context 34 | def migrate(ctx, vmid, target, online, follow, wait, dry_run): 35 | """Migrate VMs in the cluster""" 36 | 37 | proxmox = PVECluster.create_from_config(ctx.obj["args"].cluster) 38 | logging.debug("ARGS: %s", ctx.obj["args"]) 39 | # Migrate a vm to a node 40 | vmid = int(vmid) 41 | target = str(target) 42 | 43 | # Check that vmid exists 44 | vm = _get_vm(proxmox, vmid) 45 | logging.debug("Source vm: %s", vm) 46 | if not vm: 47 | print("Source vm not found") 48 | sys.exit(1) 49 | # Get source node 50 | node = proxmox.find_node(vm.node) 51 | if not node: 52 | print("Source node does not exists") 53 | sys.exit(1) 54 | logging.debug("Source node: %s", node) 55 | 56 | # Check target node exists 57 | target = proxmox.find_node(target) 58 | if not target: 59 | print("Target node does not exists") 60 | sys.exit(1) 61 | # Check target node a les ressources 62 | # FIXME 63 | 64 | # Check que la migration est possible 65 | check = proxmox.api.nodes(node.node).qemu(vmid).migrate.get(node=node.node, target=target.node) 66 | logging.debug("Migration check: %s", check) 67 | options = {} 68 | options["node"] = node.node 69 | options["target"] = target.node 70 | options["online"] = int(online) 71 | if len(check["local_disks"]) > 0: 72 | options["with-local-disks"] = int(True) 73 | 74 | if not dry_run: 75 | # Lancer tache de migration 76 | upid = proxmox.api.nodes(node.node).qemu(vmid).migrate.post(**options) 77 | # Suivre la task cree 78 | # pylint: disable=duplicate-code 79 | proxmox.refresh() 80 | _task = proxmox.find_task(upid) 81 | print_task(proxmox, upid, follow, wait) 82 | else: 83 | print("Dry run, skipping migration") 84 | 85 | 86 | def _get_vm(proxmox, vmid): 87 | for v in proxmox.vms: 88 | logging.debug("_get_vm: %s", v) 89 | if v.vmid == vmid: 90 | return v 91 | return None 92 | -------------------------------------------------------------------------------- /src/pvecontrol/cli.py: -------------------------------------------------------------------------------- 1 | import re 2 | import logging 3 | import click 4 | 5 | from pvecontrol.utils import print_output 6 | from pvecontrol.models.cluster import PVECluster 7 | 8 | 9 | def _make_filter_type_generator(columns): 10 | def _regexp_type(value): 11 | try: 12 | return re.compile(value) 13 | except re.error as e: 14 | raise click.BadParameter(f"invalid regular expression: '{value}' ({e})") 15 | 16 | def _column_type(value): 17 | if value not in columns: 18 | choices = ", ".join([f"'{c}'" for c in columns]) 19 | raise click.BadParameter(f"'{value}' is not one of {choices}.") 20 | return value 21 | 22 | while True: 23 | yield _column_type 24 | yield _regexp_type 25 | 26 | 27 | def with_table_options(columns, default_sort): 28 | filter_type_generator = _make_filter_type_generator(columns) 29 | 30 | def filter_type(x): 31 | return next(filter_type_generator)(x) 32 | 33 | def check_cols(cols): 34 | res = [] 35 | for col in cols.split(","): 36 | if col in columns: 37 | res.append(col) 38 | else: 39 | logging.warning("Column (%s) doesn't exist, will be ignored...", col) 40 | return res 41 | 42 | def _add_options(func): 43 | func = click.option( 44 | "--columns", 45 | type=str, 46 | metavar="COLUMNS", 47 | default=",".join(columns), 48 | help="Comma-separated list of columns", 49 | callback=lambda *v: check_cols(v[2]), 50 | )(func) 51 | func = click.option( 52 | "--filter", 53 | "filters", 54 | type=filter_type, 55 | nargs=2, 56 | metavar="COLUMN REGEXP", 57 | multiple=True, 58 | help="Regex to filter items (can be set multiple times)", 59 | callback=lambda *v: v[2], 60 | )(func) 61 | func = click.option( 62 | "--sort-by", 63 | type=click.Choice(columns), 64 | default=default_sort, 65 | show_default=True, 66 | help="Key used to sort items", 67 | )(func) 68 | return func 69 | 70 | return _add_options 71 | 72 | 73 | def task_related_command(func): 74 | func = click.option("-w", "--wait", is_flag=True, help="Follow task log output")(func) 75 | func = click.option("-f", "--follow", is_flag=True, help="Wait task end")(func) 76 | return func 77 | 78 | 79 | def migration_related_command(func): 80 | func = click.option("--dry-run", is_flag=True, help="Dry run, do not execute migration for real")(func) 81 | func = click.option("--online", is_flag=True, default=False, help="Perform an online migration")(func) 82 | func = task_related_command(func) 83 | return func 84 | 85 | 86 | class ResourceGroup(click.Group): 87 | def __init__(self, name, columns, default_sort, list_callback, *args, **kwargs): 88 | kwargs["help"] = f"{name[0].upper()+name[1:]} related commands" 89 | super().__init__(*args, **kwargs) 90 | add_list_resource_command(name, self, columns, default_sort, list_callback) 91 | 92 | 93 | def add_list_resource_command(resource_name, root_cmd, default_columns, default_sort, list_callback): 94 | @root_cmd.command("list", help=f"List {resource_name}s in the cluster") 95 | @with_table_options(default_columns, default_sort) 96 | @click.pass_context 97 | def _(ctx, sort_by, columns, filters): 98 | proxmox = PVECluster.create_from_config(ctx.obj["args"].cluster) 99 | output = ctx.obj["args"].output 100 | 101 | data = list_callback(proxmox) 102 | data = [ 103 | dict((k, item.__dict__[k] if hasattr(item, "__dict__") else item[k]) for k in default_columns) 104 | for item in data 105 | ] 106 | 107 | print_output(data, columns=columns, sortby=sort_by, filters=filters, output=output) 108 | -------------------------------------------------------------------------------- /src/pvecontrol/config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import confuse 4 | 5 | 6 | configtemplate = { 7 | "clusters": confuse.Sequence( 8 | { 9 | "name": str, 10 | "host": str, 11 | "user": str, 12 | "password": confuse.Optional(str, None), 13 | "proxy_certificate": confuse.Optional( 14 | confuse.OneOf( 15 | [ 16 | str, 17 | { 18 | "cert": str, 19 | "key": str, 20 | }, 21 | ] 22 | ), 23 | None, 24 | ), 25 | "token_name": confuse.Optional(str, None), 26 | "token_value": confuse.Optional(str, None), 27 | "timeout": confuse.Optional(int, default=60), 28 | "ssl_verify": confuse.Optional(bool, False), 29 | "node": confuse.Optional( 30 | { 31 | "cpufactor": confuse.Optional(float, None), 32 | "memoryminimum": confuse.Optional(int, None), 33 | }, 34 | default={}, 35 | ), 36 | "vm": confuse.Optional( 37 | { 38 | "max_last_backup": int, 39 | }, 40 | default={}, 41 | ), 42 | } 43 | ), 44 | "node": {"cpufactor": float, "memoryminimum": int}, 45 | "vm": {"max_last_backup": int}, 46 | } 47 | 48 | 49 | config = confuse.LazyConfig("pvecontrol", __name__) 50 | 51 | 52 | def set_config(cluster_name): 53 | validconfig = config.get(configtemplate) 54 | logging.debug("configuration is %s", validconfig) 55 | 56 | # FIXME trouver une methode plus clean pour recuperer la configuration du bon cluster 57 | # Peut etre rework la configuration completement avec un dict 58 | clusterconfig = False 59 | for c in validconfig.clusters: 60 | if c.name == cluster_name: 61 | clusterconfig = c 62 | if not clusterconfig: 63 | logging.error('No such cluster "%s"', cluster_name) 64 | sys.exit(1) 65 | logging.debug("clusterconfig is %s", clusterconfig) 66 | 67 | for k, v in validconfig.node.items(): 68 | clusterconfig.node[k] = clusterconfig.node[k] if clusterconfig.node.get(k) else v 69 | 70 | for k, v in validconfig.vm.items(): 71 | clusterconfig.vm[k] = clusterconfig.vm[k] if clusterconfig.vm.get(k) else v 72 | 73 | return clusterconfig 74 | -------------------------------------------------------------------------------- /src/pvecontrol/config_default.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | clusters: 4 | - name: cluster1 5 | host: 127.0.0.1 6 | user: pvecontrol@pve 7 | timeout: 60 8 | ssl_verify: false 9 | # password: somerandomsecret 10 | # token_name: mytokenname 11 | # token_value: somemorerandomsecret 12 | 13 | node: 14 | cpufactor: 2.5 15 | memoryminimum: 8589934592 16 | 17 | vm: 18 | max_last_backup: 1500 19 | -------------------------------------------------------------------------------- /src/pvecontrol/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/enix/pvecontrol/e86f30d4bc0c30125ef95e37e729f7a1d3bb2bf0/src/pvecontrol/models/__init__.py -------------------------------------------------------------------------------- /src/pvecontrol/models/backup_job.py: -------------------------------------------------------------------------------- 1 | class PVEBackupJob: 2 | """Proxmox VE Backup Job""" 3 | 4 | _default_kwargs = { 5 | "all": 0, 6 | "compress": None, 7 | "enabled": None, 8 | "exclude": "", 9 | "fleecing": None, 10 | "mode": None, 11 | "next-run": None, 12 | "node": None, 13 | "notes-template": None, 14 | "pool": None, 15 | "prune-backups": None, 16 | "schedule": None, 17 | "storage": None, 18 | "type": None, 19 | "vmid": "", 20 | } 21 | 22 | def __init__(self, backup_id, **kwargs): 23 | self.id = backup_id 24 | 25 | for k, v in self._default_kwargs.items(): 26 | self.__setattr__(k, kwargs.get(k, v)) 27 | 28 | self.all = self.all == 1 29 | self.vmid = self.vmid.split(",") 30 | self.exclude = self.exclude.split(",") 31 | 32 | def __str__(self): 33 | output = f"Vm(s): {self.vmid}\n" + f"Id: {self.id}\n" 34 | for key in self._default_kwargs: 35 | output += f"{key.capitalize()}: {self.__getattribute__(key)}\n" 36 | return output 37 | 38 | def is_selection_matching(self, vm): 39 | if self.node is not None and self.node != vm.node: 40 | return False 41 | if self.pool is not None: 42 | return self.pool == vm.pool 43 | if self.all: 44 | return str(vm.vmid) not in self.exclude 45 | return str(vm.vmid) in self.vmid 46 | -------------------------------------------------------------------------------- /src/pvecontrol/models/cluster.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import fnmatch 3 | import sys 4 | 5 | from proxmoxer import ProxmoxAPI 6 | from requests.exceptions import SSLError 7 | 8 | from pvecontrol.utils import defaulter, run_auth_commands 9 | from pvecontrol.models.node import PVENode 10 | from pvecontrol.models.storage import PVEStorage 11 | from pvecontrol.models.task import PVETask 12 | from pvecontrol.models.backup_job import PVEBackupJob 13 | from pvecontrol.models.volume import PVEVolume 14 | from pvecontrol.config import set_config 15 | 16 | 17 | class PVECluster: 18 | """Proxmox VE Cluster""" 19 | 20 | def __init__(self, name, host, config, timeout, verify_ssl=False, **auth): 21 | try: 22 | self.api = ProxmoxAPI(host, timeout=timeout, verify_ssl=verify_ssl, **auth) 23 | except SSLError as e: 24 | print(e) 25 | sys.exit(1) 26 | self.name = name 27 | self.config = config 28 | self._tasks = None 29 | self._ha = None 30 | self._backups = None 31 | self._backup_jobs = None 32 | self._initstatus() 33 | 34 | def _initstatus(self): 35 | self.status = self.api.cluster.status.get() 36 | self.resources = self.api.cluster.resources.get() 37 | 38 | self.nodes = [] 39 | for node in self.resources_nodes: 40 | self.nodes.append( 41 | PVENode( 42 | self, 43 | node["node"], 44 | node["status"], 45 | kwargs=node, 46 | ) 47 | ) 48 | 49 | self.storages = [] 50 | for storage in self.resources_storages: 51 | self.storages.append( 52 | PVEStorage(self.api, storage.pop("node"), storage.pop("id"), storage.pop("shared"), **storage) 53 | ) 54 | 55 | @staticmethod 56 | def create_from_config(cluster_name): 57 | logging.info("Proxmox cluster: %s", cluster_name) 58 | 59 | clusterconfig = set_config(cluster_name) 60 | auth = run_auth_commands(clusterconfig) 61 | proxmoxcluster = PVECluster( 62 | clusterconfig.name, 63 | clusterconfig.host, 64 | config={"node": clusterconfig.node, "vm": clusterconfig.vm}, 65 | verify_ssl=clusterconfig.ssl_verify, 66 | timeout=clusterconfig.timeout, 67 | **auth, 68 | ) 69 | 70 | return proxmoxcluster 71 | 72 | @property 73 | def ha(self): 74 | if self._ha is not None: 75 | return self._ha 76 | 77 | self._ha = { 78 | "groups": self.api.cluster.ha.groups.get(), 79 | "manager_status": self.api.cluster.ha.status.manager_status.get(), 80 | "resources": self.api.cluster.ha.resources.get(), 81 | } 82 | return self._ha 83 | 84 | @property 85 | def tasks(self): 86 | if self._tasks is not None: 87 | return self._tasks 88 | 89 | self._tasks = [] 90 | for task in self.api.cluster.tasks.get(): 91 | logging.debug("Get task informations: %s", (str(task))) 92 | self._tasks.append(PVETask(self.api, task["upid"])) 93 | return self._tasks 94 | 95 | def refresh(self): 96 | self._initstatus() 97 | 98 | # force tasks refesh 99 | self._tasks = None 100 | _ = self.tasks 101 | 102 | # force ha information refesh 103 | self._ha = None 104 | _ = self.ha 105 | 106 | def __str__(self): 107 | output = f"Proxmox VE Cluster {self.name}\n" 108 | output += f" Status: {self.status}\n" 109 | output += f" Resources: {self.resources}\n" 110 | output += " Nodes:\n" 111 | for node in self.nodes: 112 | output += f"{node}\n" 113 | return output 114 | 115 | @property 116 | def vms(self): 117 | """Return all vms on this cluster""" 118 | vms = [] 119 | for node in self.nodes: 120 | for vm in node.vms: 121 | vms.append(vm) 122 | return vms 123 | 124 | def find_node(self, nodename): 125 | """Check for node is running on this cluster""" 126 | for node in self.nodes: 127 | if node.node == nodename: 128 | return node 129 | return False 130 | 131 | def find_nodes(self, pattern): 132 | """Find a list of nodes running on this cluster based on a glob pattern""" 133 | nodes = [] 134 | for node in self.nodes: 135 | if fnmatch.fnmatchcase(node.node, pattern): 136 | nodes.append(node) 137 | return nodes 138 | 139 | def find_task(self, upid): 140 | """Return a task by upid""" 141 | for task in self.tasks: 142 | if task.upid == upid: 143 | return task 144 | return False 145 | 146 | @property 147 | def is_healthy(self): 148 | return bool([item for item in self.status if item.get("type") == "cluster"][0]["quorate"]) 149 | 150 | def get_vm(self, vm_id): 151 | if isinstance(vm_id, str): 152 | vm_id = int(vm_id) 153 | 154 | result = None 155 | node_name = None 156 | for vm in self.resources_vms: 157 | if vm["vmid"] == vm_id: 158 | node_name = vm["node"] 159 | break 160 | 161 | for node in self.nodes: 162 | if node.node == node_name: 163 | result = [v for v in node.vms if v.vmid == vm_id][0] 164 | break 165 | 166 | return result 167 | 168 | @property 169 | def resources_nodes(self): 170 | return [ 171 | defaulter(resource, ["cpu", "mem", "maxmem", "maxcpu", "disk", "maxdisk"], 0) 172 | for resource in self.resources 173 | if resource["type"] == "node" 174 | ] 175 | 176 | @property 177 | def resources_vms(self): 178 | return [ 179 | defaulter(resource, ["maxcpu", "maxdisk", "maxmem"], 0) 180 | for resource in self.resources 181 | if resource["type"] == "qemu" 182 | ] 183 | 184 | @property 185 | def resources_storages(self): 186 | return [ 187 | defaulter(resource, ["disk", "maxdisk"], 0) for resource in self.resources if resource["type"] == "storage" 188 | ] 189 | 190 | def get_storage(self, storage_name): 191 | return next(filter(lambda s: s.storage == storage_name, self.storages), None) 192 | 193 | @property 194 | def cpu_metrics(self): 195 | nodes = self.resources_nodes 196 | total_cpu = sum(node["maxcpu"] for node in nodes) 197 | total_cpu_usage = sum(node["cpu"] for node in nodes) 198 | total_cpu_allocated = sum(node.allocatedcpu for node in self.nodes) 199 | cpu_percent = total_cpu_usage / total_cpu * 100 if total_cpu else 0 200 | 201 | return { 202 | "total": total_cpu, 203 | "usage": total_cpu_usage, 204 | "allocated": total_cpu_allocated, 205 | "percent": cpu_percent, 206 | } 207 | 208 | @property 209 | def memory_metrics(self): 210 | nodes = self.resources_nodes 211 | total_memory = sum(node["maxmem"] for node in nodes) 212 | total_memory_usage = sum(node["mem"] for node in nodes) 213 | total_memory_allocated = sum(node.allocatedmem for node in self.nodes) 214 | memory_percent = total_memory_usage / total_memory * 100 if total_memory else 0 215 | 216 | return { 217 | "total": total_memory, 218 | "usage": total_memory_usage, 219 | "allocated": total_memory_allocated, 220 | "percent": memory_percent, 221 | } 222 | 223 | @property 224 | def disk_metrics(self): 225 | storages = self.resources_storages 226 | total_disk = sum(node["maxdisk"] for node in storages) 227 | total_disk_usage = sum(node["disk"] for node in storages) 228 | disk_percent = total_disk_usage / total_disk * 100 if total_disk else 0 229 | 230 | return { 231 | "total": total_disk, 232 | "usage": total_disk_usage, 233 | "percent": disk_percent, 234 | } 235 | 236 | @property 237 | def metrics(self): 238 | return { 239 | "cpu": self.cpu_metrics, 240 | "memory": self.memory_metrics, 241 | "disk": self.disk_metrics, 242 | } 243 | 244 | @property 245 | def backups(self): 246 | if self._backups is None: 247 | self._backups = [] 248 | for item in PVEStorage.get_grouped_list(self): 249 | logging.debug("Find storage: %s", (str(item))) 250 | for backup in item["storage"].get_content("backup"): 251 | logging.debug("New vm backup: %s", (str(backup))) 252 | self._backups.append( 253 | PVEVolume(backup.pop("volid"), backup.pop("format"), backup.pop("size"), **backup) 254 | ) 255 | return self._backups 256 | 257 | @property 258 | def backup_jobs(self): 259 | if self._backup_jobs is None: 260 | self._backup_jobs = [] 261 | for backup_job in self.api.cluster.backup.get(): 262 | logging.debug("New backup job: %s", (str(backup_job))) 263 | self._backup_jobs.append(PVEBackupJob(backup_job.pop("id"), **backup_job)) 264 | return self._backup_jobs 265 | -------------------------------------------------------------------------------- /src/pvecontrol/models/node.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from pvecontrol.utils import defaulter 4 | from pvecontrol.models.vm import PVEVm, VmStatus 5 | 6 | 7 | COLUMNS = ["node", "status", "allocatedcpu", "maxcpu", "mem", "allocatedmem", "maxmem"] 8 | 9 | 10 | class NodeStatus(Enum): 11 | UNKNOWN = 0 12 | ONLINE = 1 13 | OFFLINE = 2 14 | 15 | 16 | class PVENode: 17 | """A proxmox VE Node""" 18 | 19 | def __init__(self, cluster, node, status, kwargs=None): 20 | if not kwargs: 21 | kwargs = {} 22 | 23 | self.node = node 24 | self.status = NodeStatus[status.upper()] 25 | self.cluster = cluster 26 | self.cpu = kwargs.get("cpu", 0) 27 | self.allocatedcpu = 0 28 | self.maxcpu = kwargs.get("maxcpu", 0) 29 | self.mem = kwargs.get("mem", 0) 30 | self.allocatedmem = 0 31 | self.maxmem = kwargs.get("maxmem", 0) 32 | self.disk = kwargs.get("disk", 0) 33 | self.maxdisk = kwargs.get("maxdisk", 0) 34 | self._init_vms() 35 | self._init_allocatedmem() 36 | self._init_allocatedcpu() 37 | 38 | def __str__(self): 39 | output = "Node: " + self.node + "\n" 40 | output += "Status: " + str(self.status) + "\n" 41 | output += f"CPU: {self.cpu}/{self.allocatedcpu}/{self.maxcpu}\n" 42 | output += f"Mem: {self.mem}/{self.allocatedmem}/{self.maxmem}\n" 43 | output += f"Disk: {self.disk}/{self.maxdisk}\n" 44 | output += "VMs: \n" 45 | for vm in self.vms: 46 | output += f" - {vm}\n" 47 | return output 48 | 49 | def _init_vms(self): 50 | self.vms = [] 51 | if self.status == NodeStatus.ONLINE: 52 | self.vms = [PVEVm(self.api, self.node, vm["vmid"], vm["status"], vm) for vm in self.resources_vms] 53 | 54 | def _init_allocatedmem(self): 55 | """Compute the amount of memory allocated to running VMs""" 56 | self.allocatedmem = 0 57 | for vm in self.vms: 58 | if vm.status != VmStatus.RUNNING: 59 | continue 60 | self.allocatedmem += vm.maxmem 61 | 62 | def _init_allocatedcpu(self): 63 | """Compute the amount of cpu allocated to running VMs""" 64 | self.allocatedcpu = 0 65 | for vm in self.vms: 66 | if vm.status != VmStatus.RUNNING: 67 | continue 68 | self.allocatedcpu += vm.cpus 69 | 70 | @property 71 | def api(self): 72 | return self.cluster.api 73 | 74 | @property 75 | def resources(self): 76 | return [resource for resource in self.cluster.resources if resource.get("node") == self.node] 77 | 78 | @property 79 | def resources_vms(self): 80 | return [ 81 | defaulter(resource, ["maxcpu", "maxdisk", "maxmem"], 0) 82 | for resource in self.resources 83 | if resource["type"] == "qemu" 84 | ] 85 | 86 | # def __contains__(self, item): 87 | # """Check if a VM is running on this node""" 88 | # for vm in self.vms: 89 | # if vm.vmid == item: 90 | # return True 91 | # return False 92 | 93 | @property 94 | def templates(self): 95 | return [vm for vm in self.vms if vm.template] 96 | -------------------------------------------------------------------------------- /src/pvecontrol/models/storage.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from pvecontrol.models.volume import PVEVolume 4 | 5 | 6 | STORAGE_SHARED_ENUM = ["local", "shared"] 7 | COLUMNS = [ 8 | "storage", 9 | "nodes", 10 | "shared", 11 | "usage", 12 | "maxdisk", 13 | "disk", 14 | "plugintype", 15 | "status", 16 | ] 17 | 18 | 19 | class StorageShared(Enum): 20 | LOCAL = 0 21 | SHARED = 1 22 | 23 | 24 | class PVEStorage: 25 | """Proxmox VE Storage""" 26 | 27 | _default_kwargs = { 28 | "storage": None, 29 | "maxdisk": None, 30 | "disk": None, 31 | "plugintype": None, 32 | "status": None, 33 | "test": None, 34 | } 35 | 36 | def __init__(self, api, node, storage_id, shared, **kwargs): 37 | self.id = storage_id 38 | self.short_id = storage_id.split("/")[-1] 39 | self.node = node 40 | self._api = api 41 | self._content = {} 42 | self._details = {} 43 | 44 | self.shared = STORAGE_SHARED_ENUM[shared] 45 | 46 | for k, v in self._default_kwargs.items(): 47 | self.__setattr__(k, kwargs.get(k, v)) 48 | 49 | @property 50 | def details(self): 51 | if self._details is None: 52 | self._details = self._api.storage(self.short_id).get() 53 | 54 | return self._details 55 | 56 | @staticmethod 57 | def get_grouped_list(proxmox): 58 | storages = {} 59 | for storage in proxmox.storages: 60 | value = {"storage": storage, "nodes": [], "usage": f"{storage.percentage:.1f}%"} 61 | if StorageShared[storage.shared.upper()] == StorageShared.SHARED: 62 | storages[storage.storage] = storages.get(storage.storage, value) 63 | storages[storage.storage]["nodes"] += [storage.node] 64 | else: 65 | storages[storage.id] = value 66 | storages[storage.id]["nodes"] += [storage.node] 67 | 68 | return storages.values() 69 | 70 | @staticmethod 71 | def get_flattened_grouped_list(proxmox): 72 | storages = PVEStorage.get_grouped_list(proxmox) 73 | 74 | for item in storages: 75 | storage = item.pop("storage").__dict__ 76 | storage.pop("node") 77 | item.update(storage) 78 | 79 | for storage in storages: 80 | storage["nodes"] = ", ".join(storage["nodes"]) 81 | 82 | return storages 83 | 84 | @property 85 | def percentage(self): 86 | return self.disk / self.maxdisk * 100 if self.maxdisk else 0 87 | 88 | @property 89 | def images(self): 90 | images = [] 91 | for image in self.get_content("images"): 92 | images.append(PVEVolume(image.pop("volid"), image.pop("format"), image.pop("size"), **image)) 93 | return images 94 | 95 | def get_content(self, content_type=None): 96 | if content_type not in self._content: 97 | self._content[content_type] = ( 98 | self._api.nodes(self.node).storage(self.short_id).content.get(content=content_type) 99 | ) 100 | return self._content[content_type] 101 | 102 | def __str__(self): 103 | output = f"Node: {self.node}\n" + f"Id: {self.id}\n" 104 | for key in self._default_kwargs: 105 | output += f"{key.capitalize()}: {self.__getattribute__(key)}\n" 106 | return output 107 | -------------------------------------------------------------------------------- /src/pvecontrol/models/task.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | import proxmoxer.core 3 | from proxmoxer.tools import Tasks 4 | 5 | 6 | COLUMNS = [ 7 | "upid", 8 | "exitstatus", 9 | "node", 10 | "type", 11 | "starttime", 12 | "endtime", 13 | "runningstatus", 14 | ] 15 | 16 | 17 | class TaskRunningStatus(Enum): 18 | RUNNING = 0 19 | STOPPED = 1 20 | VANISHED = 2 21 | 22 | 23 | class PVETask: 24 | """Proxmox VE Task""" 25 | 26 | _api = None 27 | 28 | def __init__(self, api, upid): 29 | task = Tasks.decode_upid(upid) 30 | 31 | self._api = api 32 | self.upid = upid 33 | self.node = task["node"] 34 | self.starttime = task["starttime"] 35 | self.type = task["type"] 36 | self.user = task["user"] 37 | self.runningstatus = TaskRunningStatus.VANISHED 38 | self.endtime = 0 39 | self.exitstatus = "UNK" 40 | 41 | self.refresh() 42 | 43 | def log(self, limit=0, start=0): 44 | return self._api.nodes(self.node).tasks(self.upid).log.get(limit=limit, start=start) 45 | 46 | def running(self): 47 | return self.runningstatus == TaskRunningStatus.RUNNING 48 | 49 | def vanished(self): 50 | return self.runningstatus == TaskRunningStatus.VANISHED 51 | 52 | def refresh(self): 53 | # This is bugguy. replace with a catch / except ? 54 | # if self.node != NodeStatus.online: 55 | # return 56 | try: 57 | status = self._api.nodes(self.node).tasks(self.upid).status.get() 58 | # Some task information can be vanished over time (tasks status files removed from the node filesystem) 59 | # In this case API return an error and we consider this tasks vanished and don't get more informations 60 | except proxmoxer.core.ResourceException: 61 | pass 62 | else: 63 | self.runningstatus = TaskRunningStatus[status.get("status", "stopped").upper()] 64 | self.endtime = status.get("endtime", 0) 65 | self.exitstatus = status.get("exitstatus", "") 66 | 67 | def decode_log(self, limit=0, start=0): 68 | log = self.log(limit, start) 69 | return Tasks.decode_log(log) 70 | -------------------------------------------------------------------------------- /src/pvecontrol/models/vm.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | COLUMNS = ["vmid", "name", "status", "node", "cpus", "maxmem", "maxdisk", "tags"] 5 | 6 | 7 | class VmStatus(Enum): 8 | STOPPED = 0 9 | RUNNING = 1 10 | PAUSED = 2 11 | SUSPENDED = 3 12 | POSTMIGRATE = 4 13 | PRELAUNCH = 5 14 | 15 | 16 | class PVEVm: 17 | """Proxmox VE Qemu VM""" 18 | 19 | _api = None 20 | 21 | def __init__(self, api, node, vmid, status, kwargs=None): 22 | if not kwargs: 23 | kwargs = {} 24 | 25 | self.vmid = vmid 26 | self.status = VmStatus[status.upper()] 27 | self.node = node 28 | self._api = api 29 | 30 | self.name = kwargs.get("name", "") 31 | self.lock = kwargs.get("lock", "") 32 | self.cpus = kwargs.get("maxcpu", 0) 33 | self.maxdisk = kwargs.get("maxdisk", 0) 34 | self.maxmem = kwargs.get("maxmem", 0) 35 | self.uptime = kwargs.get("uptime", 0) 36 | self.tags = set(filter(None, kwargs.get("tags", "").split(";"))) 37 | self.template = kwargs.get("template", 0) 38 | self.pool = kwargs.get("pool", "") 39 | 40 | self._config = None 41 | 42 | @property 43 | def config(self): 44 | if not self._config: 45 | self._config = self._api.nodes(self.node).qemu(self.vmid).config.get() 46 | 47 | return self._config 48 | 49 | def __str__(self): 50 | str_keys = [ 51 | "vmid", 52 | "status", 53 | "name", 54 | "lock", 55 | "cpus", 56 | "maxdisk", 57 | "maxmem", 58 | "uptime", 59 | "tags", 60 | "template", 61 | ] 62 | output = [] 63 | for k in str_keys: 64 | output.append(f"{k}: {getattr(self, k)}") 65 | return ", ".join(output) 66 | 67 | def migrate(self, target, online=False): 68 | options = {} 69 | options["node"] = self.node 70 | options["target"] = target 71 | check = self._api.nodes(self.node).qemu(self.vmid).migrate.get(**options) 72 | # logging.debug("Migration check: %s"%check) 73 | options["online"] = int(online) 74 | if len(check["local_disks"]) > 0: 75 | options["with-local-disks"] = int(True) 76 | 77 | upid = self._api.nodes(self.node).qemu(self.vmid).migrate.post(**options) 78 | return upid 79 | 80 | def get_backup_jobs(self, proxmox): 81 | vm_backup_jobs = [] 82 | for backup_job in proxmox.backup_jobs: 83 | if backup_job.is_selection_matching(self): 84 | vm_backup_jobs.append(backup_job) 85 | return vm_backup_jobs 86 | 87 | def get_backups(self, proxmox): 88 | return [backup for backup in proxmox.backups if backup.vmid == self.vmid] 89 | 90 | def get_last_backup(self, proxmox): 91 | backups = sorted(self.get_backups(proxmox), key=lambda x: x.ctime) 92 | return backups[-1] if len(backups) > 0 else None 93 | -------------------------------------------------------------------------------- /src/pvecontrol/models/volume.py: -------------------------------------------------------------------------------- 1 | class PVEVolume: 2 | """Proxmox VE Volume""" 3 | 4 | _default_kwargs = { 5 | "content": None, 6 | "ctime": None, 7 | "encrypted": None, 8 | "notes": None, 9 | "parent": None, 10 | "path": None, 11 | "protected": None, 12 | "subtype": None, 13 | "used": None, 14 | "verification": None, 15 | "vmid": None, 16 | } 17 | 18 | def __init__(self, volid, volume_format, size, **kwargs): 19 | self.volid = volid 20 | self.format = volume_format 21 | self.size = size 22 | 23 | for k, v in self._default_kwargs.items(): 24 | self.__setattr__(k, kwargs.get(k, v)) 25 | 26 | def __str__(self): 27 | output = f"Id: {self.volid}\n" 28 | for key in self._default_kwargs: 29 | output += f"{key.capitalize()}: {self.__getattribute__(key)}\n" 30 | return output 31 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/__init__.py: -------------------------------------------------------------------------------- 1 | from .sanitychecks import SanityCheck 2 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/checks.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import Enum 3 | 4 | from pvecontrol.utils import Fonts, terminal_support_utf_8, terminal_support_colors 5 | 6 | 7 | class CheckType(Enum): 8 | HA = "HIGH_AVAILABILITY" 9 | NODE = "NODE" 10 | VM = "VIRTUAL_MACHINE" 11 | STORAGE = "STORAGE" 12 | 13 | 14 | class CheckCode(Enum): 15 | CRIT = "CRITICAL" 16 | WARN = "WARNING" 17 | INFO = "INFO" 18 | OK = "OK" 19 | 20 | 21 | ICONS_UTF8 = { 22 | CheckCode.CRIT.value: "❌", 23 | CheckCode.WARN.value: "⚠️", 24 | CheckCode.INFO.value: "ℹ️", 25 | CheckCode.OK.value: "✅", 26 | } 27 | 28 | ICONS_ASCII = { 29 | CheckCode.CRIT.value: "[CRIT]", 30 | CheckCode.WARN.value: "[WARN]", 31 | CheckCode.INFO.value: "[INFO]", 32 | CheckCode.OK.value: "[OK]", 33 | } 34 | 35 | ICONS_COLORED_ASCII = { 36 | CheckCode.CRIT.value: f"{Fonts.RED}[CRIT]{Fonts.END}", 37 | CheckCode.WARN.value: f"{Fonts.YELLOW}[WARN]{Fonts.END}", 38 | CheckCode.INFO.value: f"{Fonts.BLUE}[INFO]{Fonts.END}", 39 | CheckCode.OK.value: f"{Fonts.GREEN}[OK]{Fonts.END}", 40 | } 41 | 42 | 43 | def get_icons(colors=True, unicode=True): 44 | if unicode and terminal_support_utf_8(): 45 | return ICONS_UTF8 46 | if colors and terminal_support_colors(): 47 | return ICONS_COLORED_ASCII 48 | return ICONS_ASCII 49 | 50 | 51 | class CheckMessage: 52 | def __init__(self, code: CheckCode, message): 53 | self.code = code 54 | self.message = message 55 | 56 | def display(self, padding_max_size, colors=True, unicode=True): 57 | icon = get_icons(colors, unicode)[self.code.value] 58 | padding = padding_max_size - len(self.message) 59 | msg = f"- {self.message}{padding * '.'}{icon}" 60 | print(msg) 61 | 62 | def __len__(self): 63 | return len(self.message) 64 | 65 | 66 | class Check(ABC): 67 | 68 | type = "" 69 | name = "" 70 | 71 | def __init__(self, proxmox, messages=None, colors=True, unicode=True): 72 | if messages is None: 73 | messages = [] 74 | self.proxmox = proxmox 75 | self.messages = messages 76 | self._colors = colors 77 | self._unicode = unicode 78 | 79 | @abstractmethod 80 | def run(self): 81 | pass 82 | 83 | @property 84 | def status(self): 85 | """Define status by the most import status in messages""" 86 | status = [] 87 | for msg in self.messages: 88 | # exit early if most import code is found. 89 | if CheckCode.CRIT == msg.code: 90 | return CheckCode.CRIT 91 | status.append(msg.code) 92 | 93 | if CheckCode.WARN in status: 94 | return CheckCode.WARN 95 | 96 | if CheckCode.INFO in status: 97 | return CheckCode.INFO 98 | 99 | return CheckCode.OK 100 | 101 | def add_messages(self, messages): 102 | if isinstance(messages, CheckMessage): 103 | self.messages.append(messages) 104 | elif isinstance(messages, list): 105 | self.messages += messages 106 | 107 | def display(self, padding_max_size): 108 | if self._colors and terminal_support_colors(): 109 | name = f"{Fonts.BOLD}{self.name}{Fonts.END}" 110 | else: 111 | name = f"{self.name}" 112 | print(f"{name}: {get_icons(self._colors, self._unicode)[self.status.value]}\n") 113 | 114 | for msg in self.messages: 115 | msg.display(padding_max_size, colors=self._colors, unicode=self._unicode) 116 | print() 117 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/sanitychecks.py: -------------------------------------------------------------------------------- 1 | from pvecontrol.models.cluster import PVECluster 2 | from pvecontrol.sanitycheck.checks import CheckCode 3 | from pvecontrol.sanitycheck.tests import DEFAULT_CHECKS, DEFAULT_CHECK_IDS 4 | 5 | 6 | class SanityCheck: 7 | 8 | def __init__(self, proxmox: PVECluster, colors=True, unicode=True): 9 | self._proxmox = proxmox 10 | self._checks = [] 11 | self._colors = colors 12 | self._unicode = unicode 13 | 14 | def run(self, checks): 15 | if not checks: 16 | checks = DEFAULT_CHECK_IDS 17 | 18 | for key in checks: 19 | check = DEFAULT_CHECKS[key](self._proxmox, colors=self._colors, unicode=self._unicode) 20 | check.run() 21 | self._checks.append(check) 22 | 23 | return self.get_exit_code() 24 | 25 | def get_exit_code(self): 26 | for check in self._checks: 27 | # exit early if most import code is found. 28 | if CheckCode.CRIT == check.status: 29 | return 1 30 | return 0 31 | 32 | def _get_longest_message(self): 33 | size = 0 34 | for check in self._checks: 35 | for msg in check.messages: 36 | size = max(size, len(msg)) 37 | return size + 1 38 | 39 | def display_footer(self): 40 | title = "SUMMARY" 41 | size = self._get_longest_message() 42 | dash_size = int((size + 2 - len(title)) / 2) 43 | print(f"{dash_size*'-'} {title} {dash_size*'-'}\n") 44 | print(f"Total checks: {len(self._checks)}") 45 | print(f"Critical: {len([check for check in self._checks if check.status == CheckCode.CRIT])}") 46 | print(f"Warning: {len([check for check in self._checks if check.status == CheckCode.WARN])}") 47 | print(f"OK: {len([check for check in self._checks if check.status == CheckCode.OK])}") 48 | print(f"Info: {len([check for check in self._checks if check.status == CheckCode.INFO])}") 49 | 50 | def display(self): 51 | size = self._get_longest_message() 52 | current_type = None 53 | for check in self._checks: 54 | if current_type != check.type: 55 | current_type = check.type 56 | dash_size = int((size + 2 - len(check.type.value)) / 2) 57 | print(f"{dash_size*'-'} {check.type.value} {dash_size*'-'}\n") 58 | check.display(size) 59 | print("") 60 | self.display_footer() 61 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/tests/__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes import Nodes 2 | from .ha_groups import HaGroups 3 | from .ha_vms import HaVms 4 | from .vm import VmsStartOnBoot, DiskUnused 5 | from .vm_backups import VmBackups 6 | 7 | DEFAULT_CHECKS = { 8 | Nodes.id: Nodes, 9 | HaGroups.id: HaGroups, 10 | HaVms.id: HaVms, 11 | VmsStartOnBoot.id: VmsStartOnBoot, 12 | VmBackups.id: VmBackups, 13 | DiskUnused.id: DiskUnused, 14 | } 15 | 16 | DEFAULT_CHECK_IDS = DEFAULT_CHECKS.keys() 17 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/tests/ha_groups.py: -------------------------------------------------------------------------------- 1 | from pvecontrol.sanitycheck.checks import Check, CheckType, CheckMessage, CheckCode 2 | 3 | 4 | class HaGroups(Check): 5 | 6 | id = "ha_groups" 7 | type = CheckType.HA 8 | name = "Check HA groups" 9 | 10 | def run(self): 11 | for group in self.proxmox.ha["groups"]: 12 | num_nodes = len(group["nodes"].split(",")) 13 | if num_nodes < 2: 14 | msg = f"Group {group['group']} contain only {num_nodes} node" 15 | self.add_messages(CheckMessage(CheckCode.CRIT, msg)) 16 | 17 | if not self.messages: 18 | msg = "HA Group checked" 19 | self.add_messages(CheckMessage(CheckCode.OK, msg)) 20 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/tests/ha_vms.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from pvecontrol.models.storage import StorageShared 4 | from pvecontrol.sanitycheck.checks import Check, CheckType, CheckMessage, CheckCode 5 | 6 | 7 | class HaVms(Check): 8 | 9 | id = "ha_vms" 10 | type = CheckType.HA 11 | name = "Check VMs in a HA group" 12 | 13 | def run(self): 14 | ha_resources = [r for r in self.proxmox.ha["resources"] if r["type"] in ["vm"]] 15 | ha_vms = [] 16 | for resource in ha_resources: 17 | vmid = resource["sid"].split(":")[1] # "sid = vm:100" 18 | if resource["type"] == "vm": 19 | ha_vms.append(self.proxmox.get_vm(vmid)) 20 | 21 | self.add_messages(self._check_disk_ha_consistency(ha_vms)) 22 | self.add_messages(self._check_cpu_ha_consistency(ha_vms)) 23 | 24 | if not self.messages: 25 | msg = "HA VMS checked" 26 | self.add_messages(CheckMessage(CheckCode.OK, msg)) 27 | 28 | def _check_disk_ha_consistency(self, ha_vms): 29 | messages = [] 30 | # Value are quite hard to find from ressources keys if it's a disk 31 | regex = r"^(.*):(vm|base)-[0-9]+-(disk|cloudinit).*" 32 | vms_not_consistent = [] 33 | for vm in ha_vms: 34 | result = {"name": vm.name, "node": vm.node, "disks": []} 35 | for k, v in vm.config.items(): 36 | if not isinstance(v, str): 37 | continue 38 | if regex_result := re.search(regex, v): 39 | storage = self.proxmox.get_storage(regex_result.group(1)) 40 | if storage is not None and StorageShared[storage.shared.upper()] != StorageShared.SHARED: 41 | result["disks"].append(k) 42 | if result["disks"]: 43 | vms_not_consistent.append(result) 44 | 45 | for vm in vms_not_consistent: 46 | msg = f"Node '{vm['node']}' has VM '{vm['name']}' with disk(s) '{', '.join(vm['disks'])}' not on shared storage" 47 | messages.append(CheckMessage(CheckCode.CRIT, msg)) 48 | 49 | return messages 50 | 51 | def _check_cpu_ha_consistency(self, ha_vms): 52 | messages = [] 53 | for vm in ha_vms: 54 | if vm.config.get("cpu", "") == "host": 55 | msg = f"Node '{vm.node}' has VM '{vm.name}' with cpu type host" 56 | messages.append(CheckMessage(CheckCode.WARN, msg)) 57 | else: 58 | msg = f"Node '{vm.node}' has VM '{vm.name}' with cpu type {vm.config.get('cpu', 'Default')}" 59 | messages.append(CheckMessage(CheckCode.OK, msg)) 60 | return messages 61 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/tests/nodes.py: -------------------------------------------------------------------------------- 1 | from pvecontrol.sanitycheck.checks import Check, CheckCode, CheckType, CheckMessage 2 | 3 | 4 | class Nodes(Check): 5 | 6 | id = "nodes" 7 | type = CheckType.NODE 8 | name = "Check Node capacity" 9 | 10 | def run(self): 11 | self._check_cpu_overcommit() 12 | self._check_mem_overcommit() 13 | 14 | def _check_mem_overcommit(self): 15 | for node in self.proxmox.nodes: 16 | if self._mem_is_overcommited(node.maxmem, self.proxmox.config["node"]["memoryminimum"], node.allocatedmem): 17 | msg = f"Node '{node.node}' is in mem overcommit status: {node.allocatedmem} allocated but {node.maxmem} available" 18 | self.add_messages(CheckMessage(CheckCode.CRIT, msg)) 19 | else: 20 | msg = f"Node '{node.node}' isn't in mem overcommit" 21 | self.add_messages(CheckMessage(CheckCode.OK, msg)) 22 | 23 | def _check_cpu_overcommit(self): 24 | for node in self.proxmox.nodes: 25 | if self._cpu_is_overcommited(node.maxcpu, self.proxmox.config["node"]["cpufactor"], node.allocatedcpu): 26 | msg = f"Node {node.node} is in cpu overcommit status: {node.allocatedcpu} allocated but {node.maxcpu} available" 27 | self.add_messages(CheckMessage(CheckCode.WARN, msg)) 28 | else: 29 | msg = f"Node '{node.node}' isn't in cpu overcommit" 30 | self.add_messages(CheckMessage(CheckCode.OK, msg)) 31 | 32 | def _cpu_is_overcommited(self, maxcpu, cpufactor, allocated_cpu): 33 | return (maxcpu * cpufactor) <= allocated_cpu 34 | 35 | def _mem_is_overcommited(self, max_mem, min_mem, allocated_mem): 36 | return (allocated_mem + min_mem) >= max_mem 37 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/tests/vm.py: -------------------------------------------------------------------------------- 1 | from pvecontrol.models.vm import VmStatus 2 | from pvecontrol.models.storage import PVEStorage, STORAGE_SHARED_ENUM 3 | from pvecontrol.sanitycheck.checks import Check, CheckCode, CheckType, CheckMessage 4 | 5 | 6 | class VmsStartOnBoot(Check): 7 | 8 | id = "vms_start_on_boot" 9 | type = CheckType.VM 10 | name = "Check vms startonboot option" 11 | 12 | def run(self): 13 | vms = [vm for vm in self.proxmox.vms if vm.template == 0] 14 | for vm in vms: 15 | self._check_vm_statonboot_option(vm) 16 | 17 | def _check_vm_statonboot_option(self, vm): 18 | if self._vm_has_startonboot_enabled(vm): 19 | self._check_vm_statonboot_enabled(vm) 20 | else: 21 | self._check_vm_statonboot_disabled(vm) 22 | 23 | def _vm_has_startonboot_enabled(self, vm): 24 | return vm.config.get("onboot", 0) == 1 25 | 26 | def _check_vm_statonboot_enabled(self, vm): 27 | if vm.status == VmStatus.RUNNING: 28 | msg = f"VM '{vm.vmid}/{vm.name}' has the good 'startonboot' option" 29 | self.add_messages(CheckMessage(CheckCode.OK, msg)) 30 | elif vm.status == VmStatus.STOPPED: 31 | msg = f"VM '{vm.vmid}/{vm.name}' is stopped but 'startonboot' is set to true" 32 | self.add_messages(CheckMessage(CheckCode.CRIT, msg)) 33 | 34 | def _check_vm_statonboot_disabled(self, vm): 35 | if vm.status == VmStatus.STOPPED: 36 | msg = f"VM '{vm.vmid}/{vm.name}' has the good 'startonboot' option" 37 | self.add_messages(CheckMessage(CheckCode.OK, msg)) 38 | elif vm.status == VmStatus.RUNNING: 39 | msg = f"VM '{vm.vmid}/{vm.name}' is running but 'startonboot' is set to false" 40 | self.add_messages(CheckMessage(CheckCode.CRIT, msg)) 41 | 42 | 43 | class DiskUnused(Check): 44 | 45 | id = "disk_unused" 46 | type = CheckType.STORAGE 47 | name = "Check disk unused" 48 | 49 | def run(self): 50 | for vm in self.proxmox.vms: 51 | self._check_vm_disk_is_unused(vm) 52 | 53 | for storage in self.proxmox.storages: 54 | if storage.plugintype == "s3": 55 | # in enix specific case, we don't want to check s3 storage 56 | continue 57 | 58 | if storage.shared == STORAGE_SHARED_ENUM[1]: 59 | self._check_shared_storage_disk_is_unused(storage) 60 | elif storage.shared == STORAGE_SHARED_ENUM[0]: 61 | self._check_local_storage_disk_is_unused(storage) 62 | 63 | def _check_vm_disk_is_unused(self, vm): 64 | for key in vm.config.keys(): 65 | # key are like unused[n], ie: unused0, unused1 ... 66 | if "unused" not in key: 67 | continue 68 | msg = f"Disk '{key}' is not used on vm {vm.vmid}/{vm.name}" 69 | self.add_messages(CheckMessage(CheckCode.CRIT, msg)) 70 | 71 | def _check_local_storage_disk_is_unused(self, storage: PVEStorage): 72 | node = self.proxmox.find_node(storage.node) 73 | node_vms_ids = [vm.vmid for vm in node.vms] 74 | 75 | images = storage.images 76 | 77 | unused_images = [image for image in images if image.vmid not in node_vms_ids] 78 | 79 | msg = f"Storage '{storage.node}/{storage.storage}' have {len(images) - len(unused_images)}/{len(images)} disk used" 80 | code = CheckCode.WARN if len(unused_images) > 0 else CheckCode.OK 81 | self.add_messages(CheckMessage(code, msg)) 82 | 83 | if len(unused_images) == 0: 84 | return 85 | 86 | for image in unused_images: 87 | msg = f"Disk '{storage.node}/{image.volid}' is not used, vm {image.vmid} doesn't exists on node" 88 | self.add_messages(CheckMessage(CheckCode.CRIT, msg)) 89 | 90 | def _check_shared_storage_disk_is_unused(self, storage: PVEStorage): 91 | images = storage.images 92 | 93 | # check image.vmid vm exist on cluster 94 | unused_images = [image for image in images if self.proxmox.get_vm(image.vmid) is None] 95 | 96 | for image in unused_images: 97 | msg = f"Disk '{storage.node}/{image.volid}' is not used, vm {image.vmid} doesn't exists" 98 | self.add_messages(CheckMessage(CheckCode.CRIT, msg)) 99 | -------------------------------------------------------------------------------- /src/pvecontrol/sanitycheck/tests/vm_backups.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from pvecontrol.sanitycheck.checks import Check, CheckCode, CheckType, CheckMessage 3 | 4 | 5 | class VmBackups(Check): 6 | 7 | id = "vm_backups" 8 | type = CheckType.VM 9 | name = "Check that Vms are backup up on a regular basis" 10 | 11 | def run(self): 12 | backuped_vms = self._check_is_backed_up() 13 | self._check_backup_ran_recently(backuped_vms) 14 | 15 | def _check_is_backed_up(self): 16 | backuped_vms = [] 17 | for vm in self.proxmox.vms: 18 | vm_backup_jobs = vm.get_backup_jobs(self.proxmox) 19 | vm_enabled_backup_ids = [backup.id for backup in vm_backup_jobs if backup.enabled == 1] 20 | if len(vm_enabled_backup_ids) > 0: 21 | msg = f"Vm {vm.vmid} ({vm.name}) is associated to {len(vm_enabled_backup_ids)} enabled backup job(s)" 22 | self.add_messages(CheckMessage(CheckCode.OK, msg)) 23 | backuped_vms.append(vm) 24 | else: 25 | msg = f"Vm {vm.vmid} ({vm.name}) is not associated to any backup job" 26 | self.add_messages(CheckMessage(CheckCode.WARN, msg)) 27 | return backuped_vms 28 | 29 | def _check_backup_ran_recently(self, vms): 30 | minutes_ago = self.proxmox.config["vm"]["max_last_backup"] 31 | 32 | for vm in vms: 33 | last_backup = vm.get_last_backup(self.proxmox) 34 | if last_backup is None: 35 | message = CheckMessage(CheckCode.WARN, f"Vm {vm.vmid} ({vm.name}) has never been backed up yet") 36 | self.add_messages(message) 37 | continue 38 | last_backup_time = datetime.fromtimestamp(last_backup.ctime) 39 | last_backup_time_str = last_backup_time.strftime("%Y-%m-%d %H:%M:%S") 40 | if last_backup_time > datetime.now() - timedelta(minutes=minutes_ago): 41 | message = f"Vm {vm.vmid} ({vm.name}) last backup is recent enough ({last_backup_time_str})" 42 | self.add_messages(CheckMessage(CheckCode.OK, message)) 43 | else: 44 | message = f"Vm {vm.vmid} ({vm.name}) last backup is too old ({last_backup_time_str})" 45 | self.add_messages(CheckMessage(CheckCode.CRIT, message)) 46 | -------------------------------------------------------------------------------- /src/pvecontrol/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | import sys 4 | import re 5 | import curses 6 | import json 7 | import subprocess 8 | 9 | from collections import OrderedDict 10 | from enum import Enum 11 | 12 | import yaml 13 | 14 | from humanize import naturalsize 15 | from prettytable import PrettyTable, TableStyle 16 | 17 | 18 | class Fonts: 19 | BLUE = "\033[94m" 20 | GREEN = "\033[92m" 21 | YELLOW = "\033[93m" 22 | RED = "\033[91m" 23 | BOLD = "\033[1m" 24 | UNDERLINE = "\033[4m" 25 | END = "\033[0m" 26 | 27 | 28 | class OutputFormats(Enum): 29 | TEXT = "text" 30 | JSON = "json" 31 | CSV = "csv" 32 | YAML = "yaml" 33 | MARKDOWN = "md" 34 | 35 | def __str__(self): 36 | return self.value 37 | 38 | 39 | def terminal_support_colors(): 40 | try: 41 | _stdscr = curses.initscr() 42 | curses.start_color() 43 | if curses.has_colors(): 44 | _num_colors = curses.color_pair(1) 45 | return curses.COLORS > 0 46 | return False 47 | except Exception: # pylint: disable=broad-exception-caught 48 | return False 49 | finally: 50 | try: 51 | curses.endwin() 52 | except Exception: # pylint: disable=broad-exception-caught 53 | pass 54 | 55 | 56 | def terminal_support_utf_8(): 57 | return sys.stdout.encoding.lower() == "utf-8" 58 | 59 | 60 | NATURALSIZE_KEYS = [ 61 | "mem", 62 | "allocatedmem", 63 | "maxmem", 64 | "disk", 65 | "allocateddisk", 66 | "maxdisk", 67 | ] 68 | 69 | 70 | def render_output(table, columns=None, sortby=None, filters=None, output=OutputFormats.TEXT): 71 | if not columns: 72 | columns = [] 73 | if not filters: 74 | filters = [] 75 | 76 | if len(columns) == 0: 77 | columns = table[0].keys() 78 | else: 79 | table = [reorder_keys(n.__dict__ if hasattr(n, "__dict__") else n, columns) for n in table] 80 | 81 | x = prepare_prettytable(table, sortby, filters) 82 | 83 | if sortby is not None: 84 | sortby = "sortby" 85 | 86 | if output in (OutputFormats.TEXT, OutputFormats.MARKDOWN): 87 | if output == OutputFormats.MARKDOWN: 88 | x.set_style(TableStyle.MARKDOWN) 89 | return x.get_string(sortby=sortby, fields=columns) 90 | if output == OutputFormats.CSV: 91 | return x.get_csv_string(sortby=sortby, fields=columns) 92 | if output in (OutputFormats.JSON, OutputFormats.YAML): 93 | json_string = x.get_json_string(sortby=sortby, fields=columns) 94 | data = json.loads(json_string)[1:] 95 | if output == OutputFormats.JSON: 96 | return json.dumps(data) 97 | return yaml.dump(data) 98 | 99 | return None 100 | 101 | 102 | def prepare_prettytable(table, sortby, filters): 103 | do_sort = sortby is not None 104 | 105 | x = PrettyTable() 106 | x.align = "l" 107 | x.field_names = [*table[0].keys(), "sortby"] if do_sort else table[0].keys() 108 | 109 | for line in table: 110 | for key in line: 111 | if isinstance(line[key], Enum): 112 | line[key] = str(line[key]) 113 | # transform set to list as some output does not support it 114 | if isinstance(line[key], set): 115 | line[key] = list(line[key]) 116 | 117 | if do_sort: 118 | line["sortby"] = line[sortby] 119 | for key in NATURALSIZE_KEYS: 120 | if key in line: 121 | line[key] = naturalsize(line[key], binary=True) 122 | 123 | for filter_key, filter_value in filters: 124 | re_filter = re.compile(filter_value) 125 | table = [line for line in table if re_filter.search(str(line[filter_key]))] 126 | 127 | for line in table: 128 | x.add_row(line.values()) 129 | 130 | return x 131 | 132 | 133 | def print_output(table, columns=None, sortby=None, filters=None, output=OutputFormats.TEXT): 134 | print(render_output(table, columns, sortby, filters, output)) 135 | 136 | 137 | def reorder_keys(input_d, keys): 138 | # Reorder keys from input dict 139 | output = OrderedDict() 140 | input_keys = input_d.keys() 141 | output_keys = keys + [item for item in input_keys if item not in keys] 142 | for key in output_keys: 143 | output[key] = input_d[key] 144 | return output 145 | 146 | 147 | def print_taskstatus(task): 148 | columns = [ 149 | "upid", 150 | "exitstatus", 151 | "node", 152 | "runningstatus", 153 | "type", 154 | "user", 155 | "starttime", 156 | ] 157 | print_output([task], columns) 158 | 159 | 160 | def print_task(proxmox, upid, follow=False, wait=False): 161 | task = proxmox.find_task(upid) 162 | logging.debug("Task: %s", task) 163 | # Vanished tasks don't have any more information available in the API 164 | if task.vanished(): 165 | print_taskstatus(task) 166 | return 167 | 168 | log = task.log(limit=0) 169 | logging.debug("Task Log: %s", log) 170 | 171 | if task.running(): 172 | if follow: 173 | print_taskstatus(task) 174 | lastline = 0 175 | print("log output, follow mode") 176 | while task.running(): 177 | task.refresh() 178 | # logging.debug("Task status: %s", status) 179 | log = task.log(limit=0, start=lastline) 180 | logging.debug("Task Log: %s", log) 181 | for line in log: 182 | print(str(line["t"])) 183 | if line["n"] > lastline: 184 | lastline = line["n"] 185 | time.sleep(1) 186 | elif wait: 187 | print_taskstatus(task) 188 | while task.running(): 189 | task.refresh() 190 | print(".", end="") 191 | sys.stdout.flush() 192 | time.sleep(1) 193 | print("") 194 | elif not wait: 195 | print_output([{"log output": task.decode_log()}]) 196 | 197 | print_taskstatus(task) 198 | 199 | 200 | def defaulter(resource: dict, keys, default): 201 | for k in keys: 202 | if k not in resource.keys(): 203 | resource[k] = default 204 | return resource 205 | 206 | 207 | def _execute_command(cmd): 208 | return subprocess.run(cmd, shell=True, check=True, capture_output=True).stdout.rstrip() 209 | 210 | 211 | def run_auth_commands(clusterconfig): 212 | auth = {} 213 | regex = r"^\$\((.*)\)$" 214 | 215 | keys = ["user", "password", "token_name", "token_value"] 216 | 217 | if clusterconfig["proxy_certificate"] is not None: 218 | if isinstance(clusterconfig.get("proxy_certificate"), str): 219 | keys.append("proxy_certificate") 220 | else: 221 | auth["proxy_certificate"] = clusterconfig["proxy_certificate"] 222 | 223 | for key in keys: 224 | value = clusterconfig.get(key) 225 | if value is not None: 226 | result = re.match(regex, value) 227 | if result: 228 | value = _execute_command(result.group(1)) 229 | auth[key] = value 230 | 231 | if "proxy_certificate" in auth and isinstance(auth["proxy_certificate"], bytes): 232 | proxy_certificate = json.loads(auth["proxy_certificate"]) 233 | auth["proxy_certificate"] = { 234 | "cert": proxy_certificate.get("cert"), 235 | "key": proxy_certificate.get("key"), 236 | } 237 | 238 | if "proxy_certificate" in auth: 239 | auth["cert"] = (auth["proxy_certificate"]["cert"], auth["proxy_certificate"]["key"]) 240 | del auth["proxy_certificate"] 241 | 242 | logging.debug("Auth: %s", auth) 243 | # check for "incompatible" auth options 244 | if "password" in auth and ("token_name" in auth or "token_value" in auth): 245 | logging.error("Auth: cannot use both password and token options together.") 246 | sys.exit(1) 247 | if "token_name" in auth and "token_value" not in auth: 248 | logging.error("Auth: token-name requires token-value option.") 249 | sys.exit(1) 250 | 251 | return auth 252 | -------------------------------------------------------------------------------- /src/tests/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["NO_COLOR"] = "1" 4 | -------------------------------------------------------------------------------- /src/tests/fixtures/api.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | import responses 4 | 5 | 6 | def execute_route(routes, method, url, **kwargs): 7 | print(f"{method} {url}") 8 | print(f"params: {kwargs['params']}") 9 | path = url.replace("https://host:8006", "") 10 | assert path in routes 11 | 12 | route = routes[path] 13 | data = route(method, **kwargs) if callable(route) else route 14 | 15 | content = json.dumps({"data": data}) 16 | print(content + "\n") 17 | 18 | return content 19 | 20 | 21 | DEFAULT_VM_CONFIG = { 22 | "memory": "1024", 23 | "vmgenid": "00000000-0000-0000-0000-000000000000", 24 | "template": 1, 25 | "scsihw": "virtio-scsi-single", 26 | "serial0": "socket", 27 | "balloon": 0, 28 | "onboot": 1, 29 | "ide2": "local:9012/vm-9012-cloudinit.qcow2,media=cdrom", 30 | "agent": "1", 31 | "cores": 1, 32 | "numa": 1, 33 | "digest": "0000000000000000000000000000000000000000", 34 | "smbios1": "uuid=00000000-0000-0000-0000-000000000000", 35 | "boot": "order=scsi0;net0", 36 | "ostype": "l26", 37 | "sockets": 1, 38 | "machine": "q35", 39 | "net0": "virtio=00:00:00:00:00:00,bridge=vmbr0", 40 | "cpu": "x86-64-v2-AES", 41 | "rng0": "source=/dev/urandom", 42 | "scsi0": "local:9012/base-9012-disk-0.qcow2,size=2G,ssd=1", 43 | "name": "template.debian-12-bookworm-amd64", 44 | } 45 | 46 | 47 | def mock_api_requests(nodes, vms, backup_jobs=None, storage_resources=None, storage_contents=None): 48 | routes = generate_routes(nodes, vms, backup_jobs, storage_resources, storage_contents) 49 | 50 | def side_effect(method, url, **kwargs): 51 | content = execute_route(routes, method, url, **kwargs) 52 | 53 | res = requests.Response() 54 | res.status_code = 200 55 | res._content = content.encode("utf-8") # pylint: disable=protected-access 56 | return res 57 | 58 | return side_effect 59 | 60 | 61 | def create_response_wrapper(nodes, vms, backup_jobs=None, storage_resources=None, storage_contents=None): 62 | routes = generate_routes(nodes, vms, backup_jobs, storage_resources, storage_contents) 63 | 64 | def wrapper(path, data=None, **kwargs): 65 | kwargs["params"] = kwargs.get("params", {}) 66 | url = "https://host:8006" + path 67 | 68 | if data is None: 69 | body = execute_route(routes, "GET", url, **kwargs) 70 | else: 71 | body = json.dumps({"data": data}) 72 | 73 | responses.get(url, body=body) 74 | 75 | return wrapper 76 | 77 | 78 | def generate_routes(nodes, vms, backup_jobs, storage_resources=None, storage_contents=None): 79 | storage_resources = storage_resources or [] 80 | routes = { 81 | "/api2/json/cluster/status": get_status(nodes), 82 | "/api2/json/cluster/resources": get_resources(nodes, vms, storage_resources), 83 | "/api2/json/nodes": get_node_resources(nodes), 84 | "/api2/json/cluster/tasks": [], 85 | "/api2/json/cluster/ha/groups": [], 86 | "/api2/json/cluster/ha/status/manager_status": [], 87 | "/api2/json/cluster/ha/resources": [], 88 | "/api2/json/cluster/backup": backup_jobs, 89 | **generate_vm_routes(nodes, vms), 90 | **generate_storages_contents_routes(nodes, storage_resources, storage_contents), 91 | } 92 | 93 | print("ROUTES:") 94 | for route_path in routes.keys(): 95 | print(route_path) 96 | print("") 97 | 98 | return routes 99 | 100 | 101 | def get_status(nodes): 102 | return [ 103 | {"type": "cluster", "version": 2, "quorate": 1, "nodes": len(nodes), "id": "cluster", "name": "devel"}, 104 | *[n["status"] for n in nodes], 105 | ] 106 | 107 | 108 | def get_resources(nodes, vms, storage_resources): 109 | return [ 110 | *[n["resource"] for n in nodes], 111 | *storage_resources, 112 | *vms, 113 | ] 114 | 115 | 116 | def get_node_resources(nodes): 117 | return [n["resource"] for n in nodes] 118 | 119 | 120 | def get_node_qemu_for_vm(vm): 121 | return { 122 | "name": vm["name"], 123 | "maxmem": vm["maxmem"], 124 | "uptime": vm["uptime"], 125 | "vmid": vm["vmid"], 126 | "mem": vm["mem"], 127 | "disk": vm["disk"], 128 | "cpu": vm["cpu"], 129 | "maxdisk": vm["maxdisk"], 130 | "diskread": vm["diskread"], 131 | "netout": vm["netout"], 132 | "netin": vm["netin"], 133 | "diskwrite": vm["diskwrite"], 134 | "status": vm["status"], 135 | "serial": 1, 136 | "pid": 454971, 137 | "cpus": 1, 138 | } 139 | 140 | 141 | def generate_vm_routes(nodes, vms): 142 | routes = {} 143 | 144 | for node in nodes: 145 | name = node["status"]["name"] 146 | routes[f"/api2/json/nodes/{name}/qemu"] = [] 147 | 148 | for vm in vms: 149 | node_name = vm["node"] 150 | vm_id = vm["vmid"] 151 | routes[f"/api2/json/nodes/{node_name}/qemu/{vm_id}/config"] = generate_vm_config_route(vm) 152 | routes[f"/api2/json/nodes/{node_name}/qemu"].append(get_node_qemu_for_vm(vm)) 153 | 154 | return routes 155 | 156 | 157 | def generate_vm_config_route(vm): 158 | if "config" in vm.keys() and vm["config"] is not None: 159 | return vm["config"] 160 | 161 | return DEFAULT_VM_CONFIG 162 | 163 | 164 | def generate_storage_content_route(storage, storages_contents): 165 | def storage_content_route(_method, params=None, **_kwargs): 166 | items = [] 167 | for item in storages_contents: 168 | storage_filter = item["volid"].split(":")[0] == storage["storage"] 169 | # we use in operator to check if the item content is a substring of the params content 170 | # ex: "image" in "images" 171 | content_filter = "content" not in params or item["content"] in params["content"] 172 | if storage_filter and content_filter: 173 | items.append(item) 174 | return items 175 | 176 | return storage_content_route 177 | 178 | 179 | def generate_storages_contents_routes(nodes, storage_resources, storages_contents): 180 | routes = {} 181 | 182 | for node in nodes: 183 | node_name = node["status"]["name"] 184 | for storage in storage_resources: 185 | 186 | storage_name = storage["storage"] 187 | route = generate_storage_content_route(storage, storages_contents[node_name][storage_name]) 188 | routes[f"/api2/json/nodes/{node_name}/storage/{storage_name}/content"] = route 189 | return routes 190 | 191 | 192 | def fake_node(node_id, local=False): 193 | resource_id = f"node/pve-devel-{node_id}" 194 | name = f"pve-devel-{node_id}" 195 | return { 196 | "status": { 197 | "id": resource_id, 198 | "nodeid": node_id, 199 | "name": name, 200 | "ip": f"10.42.24.{node_id}", 201 | "local": 1 if local else 0, 202 | "type": "node", 203 | "online": 1, 204 | "level": "", 205 | }, 206 | "resource": { 207 | "id": resource_id, 208 | "node": name, 209 | "maxmem": 202758361088, 210 | "disk": 20973391872, 211 | "mem": 4133466112, 212 | "uptime": 1987073, 213 | "maxdisk": 33601372160, 214 | "cpu": 0.00572599602193961, 215 | "type": "node", 216 | "status": "online", 217 | "level": "", 218 | "maxcpu": 32, 219 | # only in /api2/json/cluster/resources 220 | "cgroup-mode": 2, 221 | "hastate": "online", 222 | # only in /api2/json/nodes 223 | "ssl_fingerprint": ":".join(["00"] * 32), 224 | }, 225 | } 226 | 227 | 228 | def fake_vm(vm_id, node, status="running", config=None): 229 | return { 230 | "id": f"qemu/{vm_id}", 231 | "vmid": vm_id, 232 | "name": f"vm-{vm_id}", 233 | "node": node["status"]["name"], 234 | "status": status, 235 | "diskread": 0, 236 | "mem": 292823173, 237 | "disk": 0, 238 | "maxmem": 1073741824, 239 | "maxdisk": 2147483648, 240 | "uptime": 869492, 241 | "diskwrite": 2405421056, 242 | "netout": 4896058, 243 | "cpu": 0.00581464923852424, 244 | "netin": 7215771, 245 | "template": 0, 246 | "hastate": "started", 247 | "maxcpu": 1, 248 | "type": "qemu", 249 | "config": config, 250 | } 251 | 252 | 253 | def fake_backup_job(job_id, vmid): 254 | return { 255 | "id": f"backup-d71917f0-{job_id:04x}", 256 | "prune-backups": {"keep-last": "3"}, 257 | "storage": "local", 258 | "notes-template": "{{guestname}}", 259 | "schedule": "sun 01:00", 260 | "fleecing": {"enabled": "0"}, 261 | "enabled": 1, 262 | "type": "vzdump", 263 | "next-run": 1735430400, 264 | "mode": "snapshot", 265 | "vmid": vmid, 266 | "compress": "zstd", 267 | } 268 | 269 | 270 | def fake_storage_resource(name, node_name, shared=1, plugin_type="s3"): 271 | return { 272 | "content": "snippets,images,iso,backup,rootdir,vztmpl", 273 | "id": f"storage/{node_name}/{name}", 274 | "disk": 0, 275 | "storage": name, 276 | "shared": shared, 277 | "status": "available", 278 | "maxdisk": 33601372160, 279 | "type": "storage", 280 | "node": node_name, 281 | "plugintype": plugin_type, 282 | } 283 | 284 | 285 | def fake_storage_content(storage, volid, vmid, content, ctime, storage_format, options): 286 | return { 287 | "volid": f"{storage}:{vmid}/{volid}", 288 | "content": content, 289 | "vmid": vmid, 290 | "ctime": ctime, 291 | "format": storage_format, 292 | "size": 1124800, 293 | **options, 294 | } 295 | 296 | 297 | def fake_backup(storage, vmid, created_at): 298 | created_at_str = created_at.strftime("%Y_%m_%d-%H_%M_%S") 299 | volid = f"vz-dump-qemu-{vmid}-{created_at_str}.vma.zst" 300 | options = { 301 | "vmid": vmid, 302 | "notes": f"VM {vmid}", 303 | "subtype": "qemu", 304 | } 305 | return fake_storage_content(storage, volid, "backup", "backup", int(created_at.timestamp()), "vma.zst", options) 306 | -------------------------------------------------------------------------------- /src/tests/sanitycheck/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/enix/pvecontrol/e86f30d4bc0c30125ef95e37e729f7a1d3bb2bf0/src/tests/sanitycheck/__init__.py -------------------------------------------------------------------------------- /src/tests/sanitycheck/test_vm_backups.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import patch 2 | 3 | import responses 4 | 5 | from pvecontrol.sanitycheck.tests.vm_backups import VmBackups 6 | from pvecontrol.sanitycheck import SanityCheck 7 | from pvecontrol.sanitycheck.checks import CheckCode 8 | from tests.sanitycheck.utils import assert_message 9 | from tests.testcase import PVEControlTestcase 10 | 11 | 12 | class PVEClusterTestcase(PVEControlTestcase): 13 | 14 | @responses.activate 15 | def test_check(self): 16 | self.responses_get("/api2/json/cluster/backup") 17 | self.responses_get("/api2/json/nodes/pve-devel-1/storage/s3/content", params={"content": "backup"}) 18 | 19 | vm_backups_check = VmBackups(self.cluster) 20 | vm_backups_check.run() 21 | 22 | sc = SanityCheck(self.cluster) 23 | with patch.object(sc, "_checks", new=[vm_backups_check]): 24 | exitcode = sc.get_exit_code() 25 | sc.display() 26 | 27 | assert exitcode == 1 28 | assert len(vm_backups_check.messages) == 8 29 | 30 | # check for associated backup jobs 31 | assert_message(vm_backups_check.messages[0], CheckCode.OK, "vm-100", "is associated") 32 | assert_message(vm_backups_check.messages[1], CheckCode.OK, "vm-101", "is associated") 33 | assert_message(vm_backups_check.messages[2], CheckCode.OK, "vm-102", "is associated") 34 | assert_message(vm_backups_check.messages[3], CheckCode.WARN, "vm-103", "not associated") 35 | assert_message(vm_backups_check.messages[4], CheckCode.WARN, "vm-104", "not associated") 36 | 37 | # check for recent backups 38 | assert_message(vm_backups_check.messages[5], CheckCode.CRIT, "vm-100", "is too old") 39 | assert_message(vm_backups_check.messages[6], CheckCode.OK, "vm-101", "is recent enough") 40 | assert_message(vm_backups_check.messages[7], CheckCode.WARN, "vm-102", "never") 41 | -------------------------------------------------------------------------------- /src/tests/sanitycheck/test_vm_disks.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import patch 2 | from pvecontrol.models.cluster import PVECluster 3 | from pvecontrol.sanitycheck.tests.vm import DiskUnused 4 | from pvecontrol.sanitycheck import SanityCheck 5 | from pvecontrol.sanitycheck.checks import CheckCode 6 | from tests.sanitycheck.utils import assert_message 7 | from tests.fixtures.api import ( 8 | mock_api_requests, 9 | fake_node, 10 | fake_vm, 11 | fake_storage_resource, 12 | fake_storage_content, 13 | DEFAULT_VM_CONFIG, 14 | ) 15 | 16 | 17 | @patch("proxmoxer.backends.https.ProxmoxHTTPAuth") 18 | @patch("proxmoxer.backends.https.ProxmoxHttpSession.request") 19 | def test_sanitycheck_vm_disk_unused(request, _proxmox_http_auth): 20 | # VM have unused disks 21 | nodes = [ 22 | fake_node(3, True), 23 | fake_node(4, True), 24 | ] 25 | vms = [ 26 | fake_vm(100, nodes[0], config={**DEFAULT_VM_CONFIG, "unused1": "vm-100-disk-1.qcow2"}), 27 | fake_vm(101, nodes[0], config={**DEFAULT_VM_CONFIG, "unused1": "local:101/vm-101-disk-1.qcow2"}), 28 | fake_vm(102, nodes[1]), 29 | fake_vm(103, nodes[1]), 30 | ] 31 | 32 | request.side_effect = mock_api_requests(nodes, vms) 33 | 34 | proxmox = PVECluster( 35 | "name", 36 | "host", 37 | config={}, 38 | **{"user": "user", "password": "password"}, 39 | timeout=1, 40 | ) 41 | 42 | disk_unused_check = DiskUnused(proxmox) 43 | disk_unused_check.run() 44 | 45 | sc = SanityCheck(proxmox) 46 | with patch.object(sc, "_checks", new=[disk_unused_check]): 47 | exitcode = sc.get_exit_code() 48 | sc.display() 49 | 50 | assert exitcode == 1 51 | assert len(disk_unused_check.messages) == 2 52 | assert_message(disk_unused_check.messages[0], CheckCode.CRIT, "Disk 'unused1' is not used on vm 100/vm-100") 53 | assert_message(disk_unused_check.messages[1], CheckCode.CRIT, "Disk 'unused1' is not used on vm 101/vm-101") 54 | 55 | 56 | @patch("proxmoxer.backends.https.ProxmoxHTTPAuth") 57 | @patch("proxmoxer.backends.https.ProxmoxHttpSession.request") 58 | def test_sanitycheck_local_storage_vm_deleted(request, _proxmox_http_auth): 59 | # Local Storage have unused disks, VM doesn't exist anymore 60 | nodes = [ 61 | fake_node(3, True), 62 | fake_node(4, True), 63 | ] 64 | vms = [fake_vm(100, nodes[0])] 65 | 66 | storage_resources = [ 67 | fake_storage_resource("local", nodes[0]["status"]["name"], shared=0, plugin_type="lvm"), 68 | fake_storage_resource("local", nodes[1]["status"]["name"], shared=0, plugin_type="lvm"), 69 | ] 70 | 71 | storage_contents = { 72 | nodes[0]["status"]["name"]: { 73 | "local": [fake_storage_content("local", "vm-100-disk-1.qcow2", 100, "image", 1738461900, "qcow2", {})], 74 | }, 75 | nodes[1]["status"]["name"]: { 76 | "local": [fake_storage_content("local", "vm-101-disk-1.qcow2", 101, "image", 1738461900, "qcow2", {})], 77 | }, 78 | } 79 | 80 | request.side_effect = mock_api_requests( 81 | nodes, vms, storage_resources=storage_resources, storage_contents=storage_contents 82 | ) 83 | 84 | proxmox = PVECluster( 85 | "name", 86 | "host", 87 | config={}, 88 | **{"user": "user", "password": "password"}, 89 | timeout=1, 90 | ) 91 | 92 | disk_unused_check = DiskUnused(proxmox) 93 | disk_unused_check.run() 94 | 95 | sc = SanityCheck(proxmox) 96 | 97 | with patch.object(sc, "_checks", new=[disk_unused_check]): 98 | exitcode = sc.get_exit_code() 99 | sc.display() 100 | 101 | assert exitcode == 1 102 | assert len(disk_unused_check.messages) == 3 103 | 104 | assert_message(disk_unused_check.messages[0], CheckCode.OK, "Storage 'pve-devel-3/local' have 1/1 disk used") 105 | assert_message(disk_unused_check.messages[1], CheckCode.WARN, "Storage 'pve-devel-4/local' have 0/1 disk used") 106 | assert_message( 107 | disk_unused_check.messages[2], 108 | CheckCode.CRIT, 109 | "Disk 'pve-devel-4/local:101/vm-101-disk-1.qcow2' is not used, vm 101 doesn't exists on node", 110 | ) 111 | 112 | 113 | @patch("proxmoxer.backends.https.ProxmoxHTTPAuth") 114 | @patch("proxmoxer.backends.https.ProxmoxHttpSession.request") 115 | def test_sanitycheck_shared_storage_vm_deleted(request, _proxmox_http_auth): 116 | # Local Storage have unused disks, VM doesn't exist anymore 117 | nodes = [ 118 | fake_node(3, True), 119 | fake_node(4, True), 120 | ] 121 | vms = [fake_vm(100, nodes[0])] 122 | 123 | storage_resources = [ 124 | fake_storage_resource("shared", nodes[0]["status"]["name"], shared=1, plugin_type="lvm"), 125 | fake_storage_resource("shared", nodes[1]["status"]["name"], shared=1, plugin_type="lvm"), 126 | ] 127 | 128 | contents = [ 129 | fake_storage_content("shared", "vm-100-disk-1.qcow2", 100, "image", 1738461900, "qcow2", {}), 130 | fake_storage_content("shared", "vm-101-disk-1.qcow2", 101, "image", 1738461900, "qcow2", {}), 131 | ] 132 | 133 | storage_contents = { 134 | nodes[0]["status"]["name"]: { 135 | "shared": contents, 136 | }, 137 | nodes[1]["status"]["name"]: { 138 | "shared": contents, 139 | }, 140 | } 141 | 142 | request.side_effect = mock_api_requests( 143 | nodes, vms, storage_resources=storage_resources, storage_contents=storage_contents 144 | ) 145 | 146 | proxmox = PVECluster( 147 | "name", 148 | "host", 149 | config={}, 150 | **{"user": "user", "password": "password"}, 151 | timeout=1, 152 | ) 153 | 154 | disk_unused_check = DiskUnused(proxmox) 155 | disk_unused_check.run() 156 | 157 | sc = SanityCheck(proxmox) 158 | 159 | with patch.object(sc, "_checks", new=[disk_unused_check]): 160 | exitcode = sc.get_exit_code() 161 | sc.display() 162 | 163 | assert exitcode == 1 164 | assert len(disk_unused_check.messages) == 2 165 | 166 | assert_message( 167 | disk_unused_check.messages[0], 168 | CheckCode.CRIT, 169 | "Disk 'pve-devel-3/shared:101/vm-101-disk-1.qcow2' is not used, vm 101 doesn't exists", 170 | ) 171 | assert_message( 172 | disk_unused_check.messages[1], 173 | CheckCode.CRIT, 174 | "Disk 'pve-devel-4/shared:101/vm-101-disk-1.qcow2' is not used, vm 101 doesn't exists", 175 | ) 176 | -------------------------------------------------------------------------------- /src/tests/sanitycheck/utils.py: -------------------------------------------------------------------------------- 1 | def assert_message(message, expected_code, *message_contains): 2 | assert message.code == expected_code 3 | for string in message_contains: 4 | assert string in message.message 5 | -------------------------------------------------------------------------------- /src/tests/test_backup_job.py: -------------------------------------------------------------------------------- 1 | from pvecontrol.models.backup_job import PVEBackupJob 2 | from pvecontrol.models.vm import PVEVm 3 | 4 | 5 | def test_is_selection_matching(): 6 | vms = [ 7 | PVEVm(None, "node-0", 0, "running", {"pool": "pool-A"}), 8 | PVEVm(None, "node-1", 1, "running", {"pool": "pool-A"}), 9 | PVEVm(None, "node-0", 2, "running", {"pool": "pool-B"}), 10 | PVEVm(None, "node-1", 3, "running", {"pool": "pool-B"}), 11 | ] 12 | 13 | def check_is_selection_matching_array(truth_table, backup_job): 14 | for i, is_scheduled in enumerate(truth_table): 15 | assert backup_job.is_selection_matching(vms[i]) == bool(is_scheduled) 16 | 17 | check_is_selection_matching_array([1, 0, 0, 0], PVEBackupJob(0, vmid="0")) 18 | check_is_selection_matching_array([0, 0, 1, 1], PVEBackupJob(0, vmid="2,3")) 19 | check_is_selection_matching_array([1, 1, 1, 1], PVEBackupJob(0, all=1)) 20 | check_is_selection_matching_array([1, 0, 0, 1], PVEBackupJob(0, all=1, exclude="1,2")) 21 | check_is_selection_matching_array([1, 0, 1, 0], PVEBackupJob(0, all=1, node="node-0")) 22 | check_is_selection_matching_array([1, 0, 0, 0], PVEBackupJob(0, all=1, node="node-0", exclude="2")) 23 | check_is_selection_matching_array([1, 1, 0, 0], PVEBackupJob(0, pool="pool-A")) 24 | check_is_selection_matching_array([0, 0, 1, 0], PVEBackupJob(0, pool="pool-B", node="node-0")) 25 | -------------------------------------------------------------------------------- /src/tests/test_cluster.py: -------------------------------------------------------------------------------- 1 | from tests.testcase import PVEControlTestcase 2 | 3 | 4 | class PVEClusterTestcase(PVEControlTestcase): 5 | 6 | def test_find_node(self): 7 | assert len(self.cluster.nodes) == len(self.nodes) 8 | assert len(self.cluster.vms) == len(self.vms) 9 | assert len(self.cluster.nodes[0].vms) == 2 10 | assert len(self.cluster.nodes[1].vms) == 3 11 | 12 | for n in self.nodes: 13 | node_object = self.cluster.find_node(n["status"]["name"]) 14 | assert node_object.node == n["status"]["name"] 15 | 16 | def test_find_nodes(self): 17 | node_objects = self.cluster.find_nodes("*devel-1") 18 | assert len(node_objects) == 1 19 | assert node_objects[0].node == "pve-devel-1" 20 | 21 | node_objects = self.cluster.find_nodes("pve-devel-*") 22 | assert len(node_objects) == len(self.nodes) 23 | 24 | node_objects = self.cluster.find_nodes("*pve-devel-[13]") 25 | assert len(node_objects) == 2 26 | assert node_objects[0].node == "pve-devel-1" 27 | assert node_objects[1].node == "pve-devel-3" 28 | 29 | node_objects = self.cluster.find_nodes("*prod*") 30 | assert len(node_objects) == 0 31 | -------------------------------------------------------------------------------- /src/tests/test_pvecontrol.py: -------------------------------------------------------------------------------- 1 | from pvecontrol import pvecontrol, get_leaf_command 2 | from pvecontrol.utils import reorder_keys 3 | from pvecontrol.actions.node import root as node, evacuate 4 | from pvecontrol.actions.vm import migrate 5 | 6 | 7 | def test_reorder_keys(): 8 | input_d = {"a": 1, "b": 2, "c": 3, "d": 4} 9 | keys = ["c", "a"] 10 | assert reorder_keys(input_d, keys) == {"c": 3, "a": 1, "b": 2, "d": 4} 11 | 12 | 13 | def test_get_leaf_command(): 14 | testcases = [ 15 | (pvecontrol, [], []), 16 | (pvecontrol, ["--debug"], ["--debug"]), 17 | (node, ["node"], []), 18 | (evacuate, ["node", "evacuate"], []), 19 | (evacuate, ["-o", "json", "node", "evacuate", "--help"], ["--help"]), 20 | (migrate, ["vm", "migrate", "id", "target"], ["id", "target"]), 21 | (None, ["foobar"], []), 22 | ] 23 | 24 | for testcase in testcases: 25 | ctx = pvecontrol.make_context(pvecontrol.name, list(testcase[1]), resilient_parsing=True) 26 | leaf_cmd, leaf_args = get_leaf_command(pvecontrol, ctx, testcase[1]) 27 | assert leaf_cmd == testcase[0] 28 | assert leaf_args == testcase[2] 29 | -------------------------------------------------------------------------------- /src/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import csv 3 | 4 | from io import StringIO 5 | from unittest.mock import Mock 6 | 7 | import yaml 8 | 9 | from pvecontrol.models.vm import PVEVm, COLUMNS 10 | from pvecontrol.utils import render_output, OutputFormats 11 | 12 | 13 | def test_render_output(): 14 | api = Mock() 15 | vms = [ 16 | PVEVm(api, "pve-node-1", 100, "running"), 17 | PVEVm(api, "pve-node-1", 101, "running"), 18 | PVEVm(api, "pve-node-2", 102, "stopped"), 19 | ] 20 | 21 | output_text = render_output(vms, columns=COLUMNS, output=OutputFormats.TEXT) 22 | output_json = render_output(vms, columns=COLUMNS, output=OutputFormats.JSON) 23 | output_csv = render_output(vms, columns=COLUMNS, output=OutputFormats.CSV) 24 | output_yaml = render_output(vms, columns=COLUMNS, output=OutputFormats.YAML) 25 | output_md = render_output(vms, columns=COLUMNS, output=OutputFormats.MARKDOWN) 26 | 27 | assert output_text.split("\n")[0].replace("+", "").replace("-", "") == "" 28 | assert len(json.loads(output_json)) == 3 29 | assert len(list(csv.DictReader(StringIO(output_csv)))) == 3 30 | assert len(yaml.safe_load(output_yaml)) == 3 31 | assert len(output_md.split("\n")) == 5 32 | -------------------------------------------------------------------------------- /src/tests/testcase.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from unittest.mock import patch 4 | from datetime import datetime, timedelta 5 | 6 | import responses 7 | 8 | from tests.fixtures.api import ( 9 | create_response_wrapper, 10 | fake_node, 11 | fake_vm, 12 | fake_backup, 13 | fake_backup_job, 14 | fake_storage_resource, 15 | ) 16 | from pvecontrol.models.cluster import PVECluster 17 | 18 | 19 | class PVEControlTestcase(unittest.TestCase): 20 | 21 | @responses.activate 22 | def setUp(self): 23 | nodes = [ 24 | fake_node(1, True), 25 | fake_node(2, True), 26 | fake_node(3, True), 27 | ] 28 | self.nodes = nodes 29 | self.vms = [ 30 | fake_vm(100, nodes[0]), 31 | fake_vm(101, nodes[0]), 32 | fake_vm(102, nodes[1]), 33 | fake_vm(103, nodes[1]), 34 | fake_vm(104, nodes[1]), 35 | ] 36 | self.backup_jobs = [ 37 | fake_backup_job(1, "100"), 38 | fake_backup_job(2, "101"), 39 | fake_backup_job(3, "102"), 40 | ] 41 | self.storage_resources = [fake_storage_resource("s3", n["status"]["name"]) for n in nodes] 42 | self.backups = [ 43 | fake_backup("s3", 100, datetime.now() - timedelta(minutes=110)), 44 | fake_backup("s3", 101, datetime.now() - timedelta(minutes=90)), 45 | ] 46 | self.storages_contents = {node["status"]["name"]: {"s3": self.backups} for node in self.nodes} 47 | 48 | self.responses_get = create_response_wrapper( 49 | self.nodes, self.vms, self.backup_jobs, self.storage_resources, self.storages_contents 50 | ) 51 | 52 | self.responses_get("/api2/json/cluster/status") 53 | self.responses_get("/api2/json/cluster/resources") 54 | 55 | with patch("proxmoxer.backends.https.ProxmoxHTTPAuth") as mock_auth: 56 | mock_auth_instance = mock_auth.return_value 57 | mock_auth_instance.timeout = 1 58 | 59 | self.cluster = PVECluster( 60 | "name", 61 | "host", 62 | config={ 63 | "node": { 64 | "cpufactor": 2.5, 65 | "memoryminimum": 81928589934592, 66 | }, 67 | "vm": { 68 | "max_last_backup": 100, 69 | }, 70 | }, 71 | verify_ssl=False, 72 | **{"user": "user", "password": "password"}, 73 | timeout=mock_auth_instance.timeout, 74 | ) 75 | --------------------------------------------------------------------------------