├── downburst
├── test
│ ├── __init__.py
│ ├── test_discover.py
│ ├── test_dehumanize.py
│ ├── test_template.py
│ └── test_util.py
├── __init__.py
├── exc.py
├── util.py
├── template.xml
├── dehumanize.py
├── wait.py
├── gen_ssh_key.py
├── meta.py
├── iso.py
├── cli.py
├── destroy.py
├── image.py
├── template.py
├── create.py
└── discover.py
├── MANIFEST.in
├── requirements-dev.txt
├── setup.cfg
├── doc
└── examples
│ ├── fixed-password.user.yaml
│ ├── static-ip.user.yaml
│ ├── debug-helper.user.yaml
│ ├── no-password.user.yaml
│ ├── custom-networks.meta.yaml
│ ├── rbd-meta.yaml
│ ├── no-console-blank.user.yaml
│ ├── eject-cdrom.user.yaml
│ ├── static-ip.meta.yaml
│ └── distro-meta.yaml
├── tox.ini
├── .gitignore
├── bootstrap
├── .github
└── workflows
│ └── ci.yml
├── LICENSE
├── setup.py
└── README.rst
/downburst/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.rst
2 | include requirements-dev.txt
3 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | distro
2 | lxml >= 2.3.2
3 | requests
4 | PyYAML >= 3.10
5 | libvirt-python >= 4.0.0
6 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [tools:pytest]
2 | norecursedirs = .* _* virtualenv
3 | [metadata]
4 | description-file = README.rst
5 |
--------------------------------------------------------------------------------
/doc/examples/fixed-password.user.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config-archive
2 | - type: text/cloud-config
3 | content: |
4 | password: foo
5 | chpasswd:
6 | expire: false
7 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py3
3 |
4 | [testenv]
5 | deps=
6 | -r{toxinidir}/requirements-dev.txt
7 | requests
8 | lxml
9 | pytest
10 | commands=py.test {posargs:downburst}
11 |
--------------------------------------------------------------------------------
/doc/examples/static-ip.user.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config-archive
2 |
3 | # kludge around meta-data ``network-interfaces`` inability to change
4 | # ip address from what dhcp gave it
5 | - |
6 | #!/bin/sh
7 | ifdown eth0
8 | ifup eth0
9 |
--------------------------------------------------------------------------------
/downburst/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = '0.0.1'
2 |
3 | import logging
4 |
5 | # We don't need to see log entries for each connection opened
6 | logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
7 | logging.WARN)
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | .#*
3 | ## the next line needs to start with a backslash to avoid looking like
4 | ## a comment
5 | \#*#
6 | .*.swp
7 |
8 | *.pyc
9 | *.pyo
10 | *.egg-info
11 | /build
12 | /dist
13 |
14 | /virtualenv
15 | /.tox
16 |
--------------------------------------------------------------------------------
/doc/examples/debug-helper.user.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config-archive
2 |
3 | # this captures output from the cloud-init runnables in a log file
4 | - type: text/cloud-config
5 | content: |
6 | output:
7 | all: '| tee -a /var/log/cloud-init-output.log'
8 |
--------------------------------------------------------------------------------
/doc/examples/no-password.user.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config-archive
2 |
3 | # passwordless ubuntu is nice; this is only for the console, ssh by
4 | # default won't allow anyone in without authorized_keys, which makes
5 | # this still safe
6 | - |
7 | #!/bin/sh
8 | exec passwd -d ubuntu
9 |
--------------------------------------------------------------------------------
/doc/examples/custom-networks.meta.yaml:
--------------------------------------------------------------------------------
1 | # You can explicitly choose what libvirt networks to connect the vm
2 | # to. Default is one interface in the ``default`` network, with a
3 | # randomly assigned MAC.
4 | downburst:
5 | networks:
6 | - source: green
7 | mac: 52:54:00:42:42:42
8 | - source: yellow
9 |
--------------------------------------------------------------------------------
/doc/examples/rbd-meta.yaml:
--------------------------------------------------------------------------------
1 | downburst:
2 | distro: "ubuntu"
3 | distroversion: "12.04"
4 | ceph-cluster-name: ceph
5 | ceph-cluster-monitors: 192.168.1.2:6789,192.168.1.3:6789
6 | ceph-cluster-pool: rbd
7 | rbd-disks: 1
8 | rbd-disks-size: 20G
9 | disk-size: 40G
10 | ram: 4G
11 | cpus: 2
12 | networks:
13 | - source: front
14 |
--------------------------------------------------------------------------------
/doc/examples/no-console-blank.user.yaml:
--------------------------------------------------------------------------------
1 | #upstart-job
2 | description "Prevent console blanking"
3 |
4 | start on (starting tty1
5 | or starting tty2
6 | or starting tty3
7 | or starting tty4
8 | or starting tty5
9 | or starting tty6)
10 |
11 | task
12 |
13 | instance $JOB
14 |
15 | exec setterm -blank 0 -powersave off <"/dev/$JOB" >"/dev/$JOB"
16 |
--------------------------------------------------------------------------------
/doc/examples/eject-cdrom.user.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config-archive
2 |
3 | # eject the cdrom (containing the cloud-init metadata)
4 | # as a signal that we've reached full functionality;
5 | # you can poll this via libvirt with
6 | #
7 | # virsh qemu-monitor-command DOMAIN --cmd '{"execute": "query-block"}'
8 | #
9 | - |
10 | #!/bin/sh
11 | exec eject /dev/cdrom
12 |
--------------------------------------------------------------------------------
/doc/examples/static-ip.meta.yaml:
--------------------------------------------------------------------------------
1 | # TODO it's already ifup'ed, this changes the file but not the running
2 | # config! you need to use the corresponding static-ip.user.yaml
3 |
4 | # the IP address here is chosen to be inside the virbr0 network
5 | # libvirt creates by default
6 | network-interfaces: |
7 | auto eth0
8 | iface eth0 inet static
9 | address 192.168.122.200
10 | network 192.168.122.0
11 | netmask 255.255.255.0
12 | broadcast 192.168.122.255
13 | gateway 192.168.122.1
14 |
--------------------------------------------------------------------------------
/doc/examples/distro-meta.yaml:
--------------------------------------------------------------------------------
1 | #Set the distro and distro version in the yaml file like so. Valid distros as of May 2013:
2 | #centos
3 | #ubuntu
4 | #fedora
5 | #opensuse
6 | #rhel
7 | #
8 | #Distro version should be in quotes to ensure proper operation although it should work without it.
9 |
10 | downburst:
11 | distro: opensuse
12 | distroversion: "12.2"
13 |
14 |
15 | #Full example of meta yaml file:
16 |
17 | downburst:
18 | ram: 1G
19 | disk-size: 3G
20 | cpus: 1
21 | distro: ubuntu
22 | distroversion: "12.10"
23 | networks:
24 | - source: front
25 | mac: 52:54:00:5a:aa:ee
26 |
--------------------------------------------------------------------------------
/downburst/exc.py:
--------------------------------------------------------------------------------
1 | class DownburstError(Exception):
2 | """
3 | Unknown Downburst error
4 | """
5 |
6 | def __str__(self):
7 | doc = self.__doc__.strip()
8 | return ': '.join([doc] + [str(a) for a in self.args])
9 |
10 |
11 | class LibvirtConnectionError(DownburstError):
12 | """
13 | Cannot connect to libvirt
14 | """
15 |
16 |
17 | class VMExistsError(DownburstError):
18 | """
19 | Virtual machine with this name exists already
20 | """
21 |
22 |
23 | class ImageHashMismatchError(DownburstError):
24 | """
25 | Image SHA-512 did not match
26 | """
27 |
28 | class HostNotProvisioned(DownburstError):
29 | """
30 | --wait was specified, but the virtual machine never came up
31 | """
32 |
--------------------------------------------------------------------------------
/downburst/util.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from lxml import etree
4 |
5 | log = logging.getLogger(__name__)
6 |
7 | def lookup_emulator(xml_text, arch):
8 | """
9 | Find emulator path for the arch in capabilities xml
10 | """
11 | arch_map = {
12 | 'amd64': 'x86_64',
13 | 'x86_64': 'x86_64',
14 | 'i686': 'i686',
15 | }
16 | _arch = arch_map.get(arch, None)
17 | assert _arch
18 | tree = etree.fromstring(xml_text)
19 | emulator_xpath = f'/capabilities/guest/arch[@name="{_arch}"]/emulator'
20 | log.debug(f'Looking for: {emulator_xpath}')
21 | emulators = tree.xpath(emulator_xpath)
22 | if emulators:
23 | return emulators[0].text
24 | else:
25 | return None
26 |
--------------------------------------------------------------------------------
/bootstrap:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | if command -v lsb_release >/dev/null 2>&1; then
5 | case "$(lsb_release --id --short)" in
6 | Ubuntu|Debian)
7 | for package in python3-venv python3-dev libxml2-dev libxslt1-dev python3-libvirt genisoimage pkg-config libvirt-dev gcc; do
8 | if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
9 | # add a space after old values
10 | missing="${missing:+$missing }$package"
11 | fi
12 | done
13 | if [ -n "$missing" ]; then
14 | echo "$0: missing required packages, please install them:" 1>&2
15 | echo " sudo apt-get install $missing"
16 | exit 1
17 | fi
18 | ;;
19 | esac
20 | fi
21 |
22 | test -d virtualenv || python3 -m venv virtualenv
23 | ./virtualenv/bin/pip install -r requirements-dev.txt
24 | ./virtualenv/bin/pip install -e .
25 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: ci
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | test:
11 | name: CI on python${{ matrix.python }} via ${{ matrix.os }}
12 | runs-on: ${{ matrix.os }}
13 | strategy:
14 | matrix:
15 | include:
16 | - os: ubuntu-22.04
17 | python: "3.10"
18 | - os: ubuntu-22.04
19 | python: "3.11"
20 | - os: ubuntu-24.04
21 | python: "3.12"
22 | steps:
23 | - uses: actions/checkout@v4
24 | - name: Setup Python
25 | uses: actions/setup-python@v5
26 | with:
27 | python-version: ${{ matrix.python }}
28 | - name: Install system deps
29 | run: sudo apt-get -y install libvirt-dev
30 | - name: Install tox
31 | run: pip install tox
32 | - name: Run unit tests
33 | run: tox -e py3
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2011 New Dream Network, LLC
2 | Copyright (c) 2014 Red Hat, Inc.
3 |
4 | Permission is hereby granted, free of charge, to any person obtaining a copy
5 | of this software and associated documentation files (the "Software"), to deal
6 | in the Software without restriction, including without limitation the rights
7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | copies of the Software, and to permit persons to whom the Software is
9 | furnished to do so, subject to the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be included in
12 | all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 | THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/downburst/test/test_discover.py:
--------------------------------------------------------------------------------
1 | from .. import discover
2 | from unittest.mock import patch, Mock
3 |
4 | @patch('requests.get')
5 | def test_ubuntu_handler(m_requests_get):
6 | h = discover.UbuntuHandler()
7 | assert 'focal' == h.get_release('20.04')
8 | assert '18.04' == h.get_version('bionic')
9 | m_request = Mock()
10 | m_request.content = (
11 | b"\n
\n\n")
18 | m_requests_get.return_value = m_request
19 | assert ('20250109','release') == h.get_latest_release_serial('focal')
20 |
21 | @patch('downburst.discover.UbuntuHandler.get_latest_release_serial')
22 | def test_get(m_get_latest_release_serial):
23 | m_get_latest_release_serial.return_value = ('20230420', 'release')
24 | checksum = 'cd824b19795e8a6b9ae993b0b5157de0275e952a7a9e8b9717ca07acec22f51b'
25 | res = discover.get('ubuntu', '20.04', 'x86_64')
26 | assert checksum == res['checksum']
27 |
28 |
--------------------------------------------------------------------------------
/downburst/template.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 524288
4 | 1
5 |
6 | hvm
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | destroy
16 | restart
17 | restart
18 |
19 | /usr/bin/kvm
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/downburst/dehumanize.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | UNITS = dict(
5 | # nop, but nice to have explicitly
6 | B=1,
7 |
8 | # SI decimal prefixes
9 | kB=10**3,
10 | MB=10**6,
11 | GB=10**9,
12 | TB=10**12,
13 | PB=10**15,
14 | EB=10**18,
15 | ZB=10**21,
16 | YB=10**24,
17 |
18 | # IEC binary prefixes; kibibyte etc
19 | KiB=2**10,
20 | MiB=2**20,
21 | GiB=2**30,
22 | TiB=2**40,
23 | PiB=2**50,
24 | EiB=2**60,
25 | ZiB=2**70,
26 | YiB=2**80,
27 |
28 | # friendly aliases
29 | k=2**10,
30 | K=2**10,
31 | M=2**20,
32 | G=2**30,
33 | T=2**40,
34 | )
35 |
36 |
37 | UNIT_RE = re.compile(r'^\s*(?P[\d.]+)\s*(?P[a-zA-Z]+)?\s*$')
38 |
39 |
40 | class NotANumberAndUnit(Exception):
41 | """
42 | Input does not look like a number with unit
43 | """
44 |
45 | def __str__(self):
46 | doc = self.__doc__.strip()
47 | return ': '.join([doc] + [repr(a) for a in self.args])
48 |
49 |
50 | def parse(s):
51 | """
52 | Parse a human-friendly size into bytes.
53 | """
54 | if s is None:
55 | return s
56 |
57 | if isinstance(s, int):
58 | return s
59 |
60 | if isinstance(s, float):
61 | return int(round(s))
62 |
63 | match = UNIT_RE.match(s)
64 | if not match:
65 | raise NotANumberAndUnit(s)
66 |
67 | unit = match.group('unit')
68 | if unit is None:
69 | unit = 'B'
70 | try:
71 | multiplier = UNITS[unit]
72 | except KeyError:
73 | raise NotANumberAndUnit(s)
74 |
75 | num = match.group('num')
76 | try:
77 | num = int(num)
78 | except ValueError:
79 | num = float(num)
80 | num = num * multiplier
81 | num = int(round(num))
82 | return num
83 |
--------------------------------------------------------------------------------
/downburst/wait.py:
--------------------------------------------------------------------------------
1 | import json
2 | import libvirt_qemu
3 | import time
4 | import logging
5 |
6 | from . import exc
7 |
8 | log = logging.getLogger(__name__)
9 |
10 |
11 | def is_cdrom_tray_open(domain):
12 | """
13 | Returns True if even one CD-ROM tray is open.
14 | """
15 | res = libvirt_qemu.qemuMonitorCommand(
16 | domain,
17 | json.dumps(
18 | {'execute': 'query-block'},
19 | ),
20 | # TODO should force this to be qmp, but python-libvirt 0.9.8
21 | # doesn't seem to be able to do that
22 | libvirt_qemu.VIR_DOMAIN_QEMU_MONITOR_COMMAND_DEFAULT,
23 | )
24 | res = json.loads(res)
25 | if 'error' in res:
26 | raise exc.DownburstError(
27 | 'Cannot query QEmu for block device state',
28 | res['error'].get('desc'),
29 | )
30 |
31 | cdroms = [dev for dev in res['return'] if 'tray_open' in dev]
32 | if not cdroms:
33 | raise exc.DownburstError(
34 | 'VM must have at least one CD-ROM to check tray status',
35 | res['error'].get('desc'),
36 | )
37 |
38 | for dev in cdroms:
39 | if dev['tray_open']:
40 | return True
41 |
42 | return False
43 |
44 |
45 | def wait_for_cdrom_eject(domain):
46 | cd_ejected = False
47 | attempts = 0
48 | # wait five minutes for this
49 | while not cd_ejected and attempts < 50:
50 | cd_ejected = is_cdrom_tray_open(domain)
51 |
52 | if not cd_ejected:
53 | if attempts % 10 == 0:
54 | log.info('waiting for cd tray open')
55 | attempts += 1
56 | time.sleep(6)
57 |
58 | if not cd_ejected:
59 | raise exc.HostNotProvisioned(
60 | '%s never came up (cd tray open check failed)' % domain
61 | )
62 |
--------------------------------------------------------------------------------
/downburst/gen_ssh_key.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import os
3 | import shutil
4 | import subprocess
5 | import tempfile
6 |
7 | def read_and_delete(parent, name):
8 | path = os.path.join(parent, name)
9 | with open(path) as f:
10 | data = f.read()
11 | os.unlink(path)
12 | return data
13 |
14 |
15 | def gen_ssh_key(args):
16 | tmp = tempfile.mkdtemp(
17 | prefix='downburst-ssh-keys.',
18 | suffix='.tmp',
19 | )
20 | keys = {}
21 | try:
22 | for key_type in ['rsa', 'dsa']:
23 | subprocess.check_call(
24 | args=[
25 | 'ssh-keygen',
26 | '-q',
27 | '-t', key_type,
28 | '-N', '',
29 | '-f', 'key',
30 | ],
31 | cwd=tmp,
32 | close_fds=True,
33 | )
34 | keys.update(
35 | [
36 | ('{t}_private'.format(t=key_type),
37 | read_and_delete(tmp, 'key')),
38 | ('{t}_public'.format(t=key_type),
39 | read_and_delete(tmp, 'key.pub')),
40 | ]
41 | )
42 | os.rmdir(tmp)
43 | except:
44 | shutil.rmtree(tmp)
45 | raise
46 |
47 | # yaml.safe_dump formats this ugly as hell, so do it manually
48 | print('#cloud-config-archive')
49 | print('- type: text/cloud-config')
50 | print(' content: |')
51 | print(' ssh_keys:')
52 | for k, v in sorted(keys.items()):
53 | print(' {0}: |'.format(k))
54 | for l in v.splitlines():
55 | print(' {0}'.format(l))
56 |
57 |
58 | def make(parser):
59 | """
60 | Generate SSH host keys in a user-meta yaml format.
61 | """
62 | parser.set_defaults(func=gen_ssh_key)
63 |
--------------------------------------------------------------------------------
/downburst/test/test_dehumanize.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from .. import dehumanize
4 |
5 |
6 | def test_None():
7 | # let caller pass in None and get out None, to make handling
8 | # default cases simpler
9 | got = dehumanize.parse(None)
10 | assert got is None
11 |
12 |
13 | def test_int():
14 | # yaml can contain just "ram: 123456", and that comes through as
15 | # an int
16 | got = dehumanize.parse(42)
17 | assert got == 42
18 |
19 |
20 | def test_float():
21 | # yaml can contain just "ram: 123.456", that's silly because there
22 | # are no fractional bytes, but let's not crap out.
23 | got = dehumanize.parse(42.51)
24 | # rounded
25 | assert got == 43
26 |
27 |
28 | def test_simple():
29 | got = dehumanize.parse('42')
30 | assert got == 42
31 |
32 |
33 | def test_kibibyte():
34 | got = dehumanize.parse('42KiB')
35 | assert got == 42*1024
36 |
37 |
38 | def test_kibibyte_space():
39 | got = dehumanize.parse('42 KiB')
40 | assert got == 42*1024
41 |
42 |
43 | def test_kibibyte_space_many():
44 | got = dehumanize.parse(' 42 KiB ')
45 | assert got == 42*1024
46 |
47 |
48 | def test_megs():
49 | got = dehumanize.parse('42M')
50 | assert got == 42*1024*1024
51 |
52 |
53 | def test_kB():
54 | got = dehumanize.parse('42kB')
55 | assert got == 42000
56 |
57 |
58 | def test_float_M():
59 | got = dehumanize.parse('1.2M')
60 | assert got == int(round(1.2*1024*1024))
61 | assert got == 1258291
62 |
63 |
64 | def test_bad_no_number():
65 | with pytest.raises(dehumanize.NotANumberAndUnit) as exc:
66 | dehumanize.parse('foo')
67 | assert str(exc.value) == "Input does not look like a number with unit: 'foo'"
68 | assert exc.value.args == ('foo',)
69 |
70 |
71 | def test_bad_unit():
72 | with pytest.raises(dehumanize.NotANumberAndUnit) as exc:
73 | dehumanize.parse('42 kilolol')
74 | assert str(exc.value) == "Input does not look like a number with unit: '42 kilolol'"
75 | assert exc.value.args == ('42 kilolol',)
76 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | from setuptools import setup, find_packages
3 | import os
4 | import re
5 | import sys
6 |
7 | module_file = open("downburst/__init__.py").read()
8 | metadata = dict(re.findall(r"__([a-z]+)__\s*=\s*['\"]([^'\"]*)['\"]", module_file))
9 | long_description = open('README.rst').read()
10 |
11 | install_requires=[
12 | 'setuptools',
13 | 'libvirt-python',
14 | 'distro',
15 | ]
16 |
17 | install_requires.extend(
18 | [ln.strip() for ln in open('requirements-dev.txt').readlines() if ln]
19 | )
20 |
21 | pyversion = sys.version_info[:2]
22 | if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
23 | install_requires.append('argparse')
24 |
25 | tests_require = [
26 | 'pytest >= 2.1.3',
27 | 'tox >= 1.2'
28 | ]
29 |
30 | setup(
31 | name='downburst',
32 | version=metadata['version'],
33 | packages=find_packages(),
34 | author='Inktank Storage, Inc.',
35 | author_email='ceph-qa@ceph.com',
36 | description='Run Cloud images on libvirt virtual machines',
37 | long_description=long_description,
38 | license='MIT',
39 | keywords='libvirt virtualization',
40 | url="https://github.com/ceph/downburst",
41 | classifiers=[
42 | "Intended Audience :: Developers",
43 | "Intended Audience :: Information Technology",
44 | "License :: OSI Approved :: MIT License",
45 | "Natural Language :: English",
46 | "Operating System :: POSIX :: Linux",
47 | "Programming Language :: Python :: 3.10",
48 | "Programming Language :: Python :: 3.11",
49 | "Programming Language :: Python :: 3.12",
50 | ],
51 |
52 | install_requires=install_requires,
53 | tests_require=tests_require,
54 |
55 | entry_points={
56 |
57 | 'console_scripts': [
58 | 'downburst = downburst.cli:main',
59 | ],
60 |
61 | 'downburst.cli': [
62 | 'create = downburst.create:make',
63 | 'destroy = downburst.destroy:make',
64 | 'discover = downburst.discover:make_lookup',
65 | 'list = downburst.discover:make',
66 | 'gen-ssh-key = downburst.gen_ssh_key:make',
67 | 'list-json = downburst.discover:make_json',
68 | ],
69 |
70 | },
71 | )
72 |
--------------------------------------------------------------------------------
/downburst/meta.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os.path
3 | import yaml
4 | import requests
5 | log = logging.getLogger(__name__)
6 |
7 | def get_ssh_pubkey():
8 | for p in ('~/.ssh/id_rsa.pub', '~/.ssh/id_ed25519.pub'):
9 | path = os.path.expanduser(p)
10 | if os.path.exists(path):
11 | with open(path, 'rt') as f:
12 | return f.readline().rstrip('\n')
13 | log.warn("Public key not found, skipping it: " + path)
14 |
15 |
16 | KEYURL='https://git.ceph.com/?p=keys.git;a=blob_plain;f=ssh/teuthology-ubuntu.pub;hb=HEAD'
17 |
18 | def keyfetch():
19 | print("Fetching default SSH key from " + KEYURL)
20 | r = requests.get(KEYURL)
21 | r.raise_for_status()
22 | gitkey = r.content.decode()
23 | if "ssh-" in gitkey:
24 | return gitkey
25 | else:
26 | raise Exception(KEYURL+" does not appear to contain an SSH key.")
27 |
28 | def gen_meta(
29 | name,
30 | extra_meta,
31 | nokey,
32 | ):
33 | meta_data = {
34 | 'instance-id': name,
35 | 'local-hostname': name,
36 | 'public-keys': [],
37 | }
38 | ssh_pubkey = get_ssh_pubkey()
39 | if ssh_pubkey is not None:
40 | meta_data['public-keys'].append(ssh_pubkey)
41 |
42 | if not nokey:
43 | ssh_gitkey = keyfetch()
44 | meta_data['public-keys'].append(ssh_gitkey)
45 |
46 | for path in extra_meta:
47 | with open(path) as f:
48 | extra_meta_data = yaml.safe_load(f)
49 | if extra_meta_data is not None:
50 | meta_data.update(extra_meta_data)
51 |
52 | return meta_data
53 |
54 |
55 | def write_meta(meta_data, fp):
56 | yaml.safe_dump(
57 | stream=fp,
58 | data=meta_data,
59 | default_flow_style=False,
60 | )
61 | fp.flush()
62 |
63 |
64 | def gen_user(
65 | name,
66 | extra_user,
67 | ):
68 | user_data = [
69 | ]
70 |
71 | for path in extra_user:
72 | with open(path) as f:
73 | if f.readline() == '#cloud-config-archive\n':
74 | # merge it into ours
75 | extra_user_data = yaml.safe_load(f)
76 | if extra_user_data is not None:
77 | user_data.extend(extra_user_data)
78 | else:
79 | # some other format; slap it in as a single string
80 | f.seek(0)
81 | extra_user_data = f.read()
82 | user_data.append(extra_user_data)
83 |
84 | return user_data
85 |
86 |
87 | def write_user(user_data, fp):
88 | fp.write('#cloud-config-archive\n')
89 | yaml.safe_dump(
90 | stream=fp,
91 | data=user_data,
92 | default_flow_style=False,
93 | )
94 | fp.flush()
95 |
--------------------------------------------------------------------------------
/downburst/test/test_template.py:
--------------------------------------------------------------------------------
1 | from .. import template
2 |
3 |
4 | def test_domain_name():
5 | tree = template.domain(
6 | name='fakename',
7 | disk_key='/fake/path',
8 | iso_key='/fake/iso',
9 | emulator='/fake/emulator/path',
10 | )
11 | name = tree.xpath('/domain/name/text()')
12 | name = ''.join(name)
13 | assert name == 'fakename'
14 |
15 |
16 | def test_domain_arch():
17 | tree = template.domain(
18 | name='fakename',
19 | disk_key='/fake/path',
20 | iso_key='/fake/iso',
21 | emulator='/fake/emulator/path',
22 | )
23 | arch = tree.xpath('/domain/os/type/@arch')
24 | assert arch == ['x86_64']
25 |
26 |
27 | def test_domain_disk():
28 | tree = template.domain(
29 | name='fakename',
30 | disk_key='/fake/path',
31 | iso_key='/fake/iso',
32 | emulator='/fake/emulator/path',
33 | )
34 | got = tree.xpath(
35 | '/domain/devices/disk[@device="disk"]/source/@file',
36 | )
37 | assert got == ['/fake/path']
38 |
39 |
40 | def test_domain_iso():
41 | tree = template.domain(
42 | name='fakename',
43 | disk_key='/fake/path',
44 | iso_key='/fake/iso',
45 | emulator='/fake/emulator/path',
46 | )
47 | got = tree.xpath(
48 | '/domain/devices/disk[@device="cdrom"]/source/@file',
49 | )
50 | assert got == ['/fake/iso']
51 |
52 |
53 | def test_domain_network_default():
54 | tree = template.domain(
55 | name='fakename',
56 | disk_key='/fake/path',
57 | iso_key='/fake/iso',
58 | emulator='/fake/emulator/path',
59 | )
60 | got = tree.xpath(
61 | '/domain/devices/interface[@type="network"]/source/@network',
62 | )
63 | assert got == ['default']
64 |
65 |
66 | def test_domain_network_custom():
67 | tree = template.domain(
68 | name='fakename',
69 | disk_key='/fake/path',
70 | iso_key='/fake/iso',
71 | networks=[
72 | dict(source='one'),
73 | dict(source='two'),
74 | ],
75 | emulator='/fake/emulator/path',
76 | )
77 | got = tree.xpath(
78 | '/domain/devices/interface[@type="network"]/source/@network',
79 | )
80 | assert got == ['one', 'two']
81 |
82 |
83 | def test_domain_network_mac():
84 | tree = template.domain(
85 | name='fakename',
86 | disk_key='/fake/path',
87 | iso_key='/fake/iso',
88 | networks=[
89 | dict(mac='12:34:56:78:90:ab'),
90 | ],
91 | emulator='/fake/emulator/path',
92 | )
93 | got = tree.xpath(
94 | '/domain/devices/interface[@type="network"]/mac/@address',
95 | )
96 | assert got == ['12:34:56:78:90:ab']
97 |
--------------------------------------------------------------------------------
/downburst/iso.py:
--------------------------------------------------------------------------------
1 | import distro
2 | import logging
3 | import os
4 | import subprocess
5 | import tempfile
6 |
7 | from lxml import etree
8 |
9 | from . import meta
10 | from . import template
11 |
12 | log = logging.getLogger(__name__)
13 |
14 | def generate_meta_iso(
15 | name,
16 | fp,
17 | meta_data,
18 | user_data,
19 | ):
20 | def gentemp(prefix):
21 | return tempfile.NamedTemporaryFile(
22 | prefix='downburst.{prefix}.'.format(prefix=prefix),
23 | suffix='.tmp',
24 | mode='wt'
25 | )
26 | with gentemp('meta') as meta_f, gentemp('user') as user_f:
27 | meta.write_meta(meta_data=meta_data, fp=meta_f)
28 | meta.write_user(user_data=user_data, fp=user_f)
29 |
30 | mkisofs = 'mkisofs'
31 | log.debug('The host distro id is %s', distro.id())
32 | if any(distro.id().startswith(_)
33 | for _ in ('debian', 'ubuntu')):
34 | mkisofs = 'genisoimage'
35 | log.debug('Using "%s" to create meta iso for "%s"', mkisofs, name)
36 | subprocess.check_call(
37 | args=[
38 | mkisofs,
39 | '-quiet',
40 | '-input-charset', 'utf-8',
41 | '-volid', 'cidata',
42 | '-joliet',
43 | '-rock',
44 | '-graft-points',
45 | 'user-data={path}'.format(path=user_f.name),
46 | 'meta-data={path}'.format(path=meta_f.name),
47 | ],
48 | stdout=fp,
49 | close_fds=True,
50 | )
51 |
52 |
53 | def upload_volume(vol, length, fp):
54 | # TODO share with image.upload_volume
55 | stream = vol.connect().newStream(flags=0)
56 | vol.upload(stream=stream, offset=0, length=length, flags=0)
57 |
58 | def handler(stream, nbytes, _):
59 | data = fp.read(nbytes)
60 | return data
61 | stream.sendAll(handler, None)
62 | stream.finish()
63 |
64 |
65 | def create_meta_iso(
66 | pool,
67 | name,
68 | meta_data,
69 | user_data,
70 | ):
71 | with tempfile.TemporaryFile() as iso:
72 | generate_meta_iso(
73 | name=name,
74 | fp=iso,
75 | meta_data=meta_data,
76 | user_data=user_data,
77 | )
78 | iso.seek(0)
79 | length = os.fstat(iso.fileno()).st_size
80 | assert length > 0
81 | volxml = template.volume(
82 | name='cloud-init.{name}.iso'.format(name=name),
83 | capacity=length,
84 | format_='raw',
85 | )
86 | vol = pool.createXML(etree.tostring(volxml).decode(), flags=0)
87 | upload_volume(
88 | vol=vol,
89 | length=length,
90 | fp=iso,
91 | )
92 | return vol
93 |
--------------------------------------------------------------------------------
/downburst/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import sys
4 |
5 | from importlib import metadata as meta
6 |
7 | from . import exc
8 |
9 |
10 | log = logging.getLogger(__name__)
11 |
12 |
13 | def parse_args():
14 | parser = argparse.ArgumentParser(
15 | description='Create an Ubuntu Cloud image vm',
16 | )
17 | parser.add_argument(
18 | '-v', '--verbose',
19 | action='store_true', default=None,
20 | help='be more verbose',
21 | )
22 | parser.add_argument(
23 | '-l', '--logfile',
24 | help='additional logfile (same as stderr log)',
25 | )
26 | parser.add_argument(
27 | '-c', '--connect',
28 | metavar='URI',
29 | help='libvirt URI to connect to',
30 | )
31 | sub = parser.add_subparsers(
32 | title='commands',
33 | metavar='COMMAND',
34 | help='description',
35 | )
36 | m = meta.entry_points()
37 | if type(m) is meta.EntryPoints:
38 | # python >=3.12 entry_points() returns metadata.EntryPoints
39 | eps = (ep for ep in m if ep.group == 'downburst.cli')
40 | else:
41 | # python <=3.11 entry_points() returns metadata.SelectableGroups
42 |
43 | eps = (ep for ep in m.get('downburst.cli',[]))
44 | for ep in eps:
45 | fn = ep.load()
46 | p = sub.add_parser(ep.name, help=fn.__doc__)
47 | # ugly kludge but i really want to have a nice way to access
48 | # the program name, with subcommand, later
49 | p.set_defaults(prog=p.prog)
50 | fn(p)
51 | parser.set_defaults(
52 | # we want to hold on to this, for later
53 | prog=parser.prog,
54 | connect='qemu:///system',
55 | )
56 | args = parser.parse_args()
57 | return args
58 |
59 |
60 | def main():
61 | args = parse_args()
62 |
63 | # turn off urllib3 connectionpool logging
64 | logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
65 | logging.WARN)
66 | logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN)
67 |
68 | loglevel = logging.INFO
69 | if args.verbose:
70 | loglevel = logging.DEBUG
71 |
72 | logging.basicConfig(
73 | level=loglevel,
74 | )
75 |
76 | if args.logfile:
77 | logger = logging.getLogger()
78 | formatter = logging.Formatter(
79 | fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
80 | datefmt='%Y-%m-%dT%H:%M:%S')
81 | handler = logging.FileHandler(filename=args.logfile)
82 | handler.setFormatter(formatter)
83 | logger.addHandler(handler)
84 |
85 | try:
86 | return args.func(args)
87 | except exc.DownburstError as e:
88 | log.error('{prog}: {msg}'.format(
89 | prog=args.prog,
90 | msg=e,
91 | ))
92 | sys.exit(1)
93 |
--------------------------------------------------------------------------------
/downburst/destroy.py:
--------------------------------------------------------------------------------
1 | import libvirt
2 | import logging
3 | import re
4 | import syslog
5 | import os
6 |
7 | from distro import id as distro_id
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def looks_like_downburst_volume(name, vol_name):
13 | # {name}.img: the primary disk for the vm
14 | if vol_name == '{0}.img'.format(name):
15 | return True
16 |
17 | # additional disks for the vm
18 | if re.match(name + r'-(\d+).img', vol_name):
19 | return True
20 |
21 | # cloud-init.{name}.iso: cloud-init meta-data CD-ROM
22 | if (vol_name.startswith('cloud-init.{0}.'.format(name))
23 | and vol_name.endswith('.iso')):
24 | return True
25 |
26 | # {name}.*.img: secondary data disks added after creation
27 | if (vol_name.startswith('{0}.'.format(name))
28 | and vol_name.endswith('.img')):
29 | return True
30 |
31 | # RBD backed objects
32 | if vol_name == name:
33 | return True
34 |
35 | # additional RBD backed objects
36 | if re.match(name + r'-(\d+)', vol_name):
37 | return True
38 |
39 | return False
40 |
41 |
42 | def destroy(args):
43 | log.debug('Connecting to libvirt...')
44 | conn = libvirt.open(args.connect)
45 | if conn is None:
46 | raise exc.LibvirtConnectionError()
47 |
48 | try:
49 | dom = conn.lookupByName(args.name)
50 | except libvirt.libvirtError as e:
51 | if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
52 | # the vm does not exist, but the associated volumes might
53 | # still exist
54 | log.debug('No virtual machine found.')
55 | pass
56 | else:
57 | raise
58 | else:
59 | # losing unsynced data is fine here; we're going to remove the
60 | # disk images next
61 | log.debug('Terminating the virtual machine')
62 | if distro_id() == 'darwin':
63 | syslog_message = f'Destroyed guest: {args.name} on {args.connect}'
64 | else:
65 | env = os.environ
66 | try:
67 | pid = os.getpid()
68 | # os.getppid() wont return the correct value:
69 | ppid = open('/proc/{pid}/stat'.format(pid=pid)).read().split()[3]
70 | ppcmdline = open('/proc/{ppid}/cmdline'.format(ppid=ppid)).read().split('\x00')
71 |
72 | except (IndexError, IOError):
73 | log.exception('Something went wrong getting PPID/cmdlineinfo')
74 | ppcmdline = 'ERROR_RETREIVING'
75 |
76 | syslog_message = 'Destroyed guest: {name} on {host} by User: {username} PPCMD: {pcmd}'.format(
77 | name=args.name,
78 | host=args.connect,
79 | username=env.get('USER'),
80 | pcmd=ppcmdline)
81 | syslog.syslog(syslog.LOG_ERR, syslog_message)
82 |
83 | try:
84 | dom.destroy()
85 | except libvirt.libvirtError as e:
86 | if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID:
87 | # it wasn't running
88 | pass
89 | else:
90 | raise
91 |
92 | dom.undefineFlags(
93 | libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
94 | | libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA,
95 | )
96 |
97 | # we're going to remove all disks that look remotely like they
98 | # could be downburst vm related
99 |
100 | # TODO to make this safe, move the images to be prefixed with
101 | # e.g. "downburst."
102 |
103 | log.debug('Getting livirt pool list...')
104 | for poolentry in conn.listStoragePools():
105 | log.debug('Checking Pool: {pool}'.format(pool=poolentry))
106 | pool = conn.storagePoolLookupByName(poolentry)
107 |
108 | for vol_name in pool.listVolumes():
109 | log.debug('Checking Volume: {volume}'.format(volume=vol_name))
110 | if looks_like_downburst_volume(
111 | name=args.name,
112 | vol_name=vol_name,
113 | ):
114 | log.debug('Deleting volume: %r', vol_name)
115 | vol = pool.storageVolLookupByName(vol_name)
116 | vol.delete(flags=0)
117 | syslog_message = 'Deleted existing volume: {volume}'.format(volume=vol_name)
118 | syslog.syslog(syslog.LOG_ERR, syslog_message)
119 |
120 |
121 |
122 | def make(parser):
123 | """
124 | Destroy a vm and its data.
125 | """
126 | parser.add_argument(
127 | 'name',
128 | metavar='NAME',
129 | help='name of the vm to destroy',
130 | # TODO check valid syntax for hostname
131 | )
132 | parser.set_defaults(
133 | func=destroy,
134 | )
135 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | ==========================================================
2 | Downburst -- fast Ubuntu Cloud image creation on libvirt
3 | ==========================================================
4 |
5 | Downburst is a tool for quickly creating virtual machines on
6 | libvirt. It uses Ubuntu's Cloud Images and qcow2 copy-on-write clones
7 | to make a VM creation practically instantaneous, customizing them at
8 | boot time with cloud-init.
9 |
10 | For more information on Ubuntu Cloud Images, please refer to:
11 |
12 | - https://cloud.ubuntu.com/
13 | - https://help.ubuntu.com/community/UEC/Images
14 | - https://help.ubuntu.com/community/CloudInit
15 | - https://cloud-images.ubuntu.com/
16 |
17 |
18 | Installation
19 | ============
20 |
21 | You can install Downburst like any other Python package, but it also
22 | comes with a convenient bootstrap script that sets it up in a virtual
23 | environment under the source directory. Just run::
24 |
25 | git clone https://github.com/ceph/downburst.git
26 | cd downburst
27 | ./bootstrap
28 |
29 | And from there on, use::
30 |
31 | ./virtualenv/bin/downburst ARGS..
32 |
33 | You can also symlink that to e.g. ``~/bin/``.
34 |
35 |
36 | Usage
37 | =====
38 |
39 | You need to give a unique name to your vm. This will become the
40 | hostname of the vm, and the libvirt domain name. Run::
41 |
42 | downburst -c URI create NAME
43 |
44 | If this tool is not being run on a machine on the test cluster used by
45 | the Red Hat Ceph development and quality assurance teams,
46 | then you should pass the --nokey option to not install the default
47 | ssh key to the created guest's authorized_hosts file.
48 |
49 | downburst -c URI create NAME --nokey
50 |
51 | The URI is the alias set in uri_aliases in ~/.config/libvirt/libvirt.conf. Example::
52 |
53 | uri_aliases = [
54 | 'vercoi01=qemu+ssh://ubuntu@vercoi01.front.sepia.ceph.com/system?no_tty=1',
55 | 'vercoi02=qemu+ssh://ubuntu@vercoi02.front.sepia.ceph.com/system?no_tty=1',
56 | 'vercoi03=qemu+ssh://ubuntu@vercoi03.front.sepia.ceph.com/system?no_tty=1',
57 | 'vercoi04=qemu+ssh://ubuntu@vercoi04.front.sepia.ceph.com/system?no_tty=1',
58 | 'vercoi05=qemu+ssh://ubuntu@vercoi05.front.sepia.ceph.com/system?no_tty=1',
59 | 'vercoi06=qemu+ssh://ubuntu@vercoi06.front.sepia.ceph.com/system?no_tty=1',
60 | 'vercoi07=qemu+ssh://ubuntu@vercoi07.front.sepia.ceph.com/system?no_tty=1',
61 | 'vercoi08=qemu+ssh://ubuntu@vercoi08.front.sepia.ceph.com/system?no_tty=1',
62 | 'senta01=qemu+ssh://ubuntu@senta01.front.sepia.ceph.com/system?no_tty=1',
63 | 'senta02=qemu+ssh://ubuntu@senta02.front.sepia.ceph.com/system?no_tty=1',
64 | 'senta03=qemu+ssh://ubuntu@senta03.front.sepia.ceph.com/system?no_tty=1',
65 | 'senta04=qemu+ssh://ubuntu@senta04.front.sepia.ceph.com/system?no_tty=1',
66 | ]
67 |
68 |
69 | You can delete a guest with (use caution)::
70 |
71 | downburst -c URI destroy NAME
72 |
73 | By default, your local SSH public key (grabbed from
74 | ``~/.ssh/id_rsa.pub``) is authorized to log in as ``ubuntu``.
75 |
76 | You can also pass in EC2-style ``meta-data`` and ``user-data``
77 | snippets; if you repeat the argument, the files will be merged::
78 |
79 | downburst create --meta-data=FILE.meta.yaml \
80 | --user-data=FILE.user.yaml NAME
81 |
82 | See ``doc/examples/`` for ideas on meta-data and user-data usage, and
83 | explore the Ubuntu links above.
84 |
85 | Valid Downburst options in meta yaml with their defaults if undefined:
86 |
87 | disk-size: (disk space)
88 | Default 10G. Example: 20G
89 | additional-disks: (number of additional drives for the guest)
90 | Default 0. Example: 5 (for 5 additional drives)
91 | additional-disks-size: (Size of the additional disks)
92 | Default 10G. Example: 100G
93 | ram: (ram amount)
94 | Default 512M. Example: 2G
95 | cpu: (cpu/core count)
96 | Default 1. Example 4
97 | networks: (what nics/networks/mac addresses)::
98 | Default Nat. Example:
99 | - source: front
100 | mac: 52:54:00:5a:aa:ee
101 | distro: (distro type)
102 | Default ubuntu. Example centos
103 | distroversion: (distro version)
104 | Default (if ubuntu) "12.04". Example "12.10"
105 | arch: (Image architecture)
106 | Default "amd64, can be "i386" or "amd64/x86_64"
107 |
108 |
109 | Distro/distroversion/arch can also be set during command line creation with --distro=value or --distroversion=value or --arch=value
110 |
111 | All available distributions/versions can be listed by running:
112 |
113 | downburst list
114 |
115 | Static SSH key generation
116 | =========================
117 |
118 | Downburst also includes a utility to create static SSH keys, for when
119 | you want to delete and recreate the vm repeatedly, but not have SSH
120 | complain all the time.
121 |
122 | To set it up, run this once::
123 |
124 | downburst gen-ssh-key >NAME.user.yaml
125 |
126 | And from there on, recreate the vm (after deleting it) with::
127 |
128 | downburst create --user-data=NAME.user.yaml NAME
129 |
130 | Adding new images
131 | =================
132 |
133 | Ubuntu images are fetched from Canonical themselves, so they shouldn't need any special attention.
134 |
135 | Here is the process we recently followed (on ``download.ceph.com``) for adding CentOS 7.2::
136 |
137 | curl -O http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1511.qcow2.xz
138 | xzcat CentOS-7-x86_64-GenericCloud-1511.qcow2.xz > repos/cloudinit/centos-7.2-20151214-cloudimg-amd64.img
139 | sha512sum repos/cloudinit/centos-7.2-20151214-cloudimg-amd64.img | cut -d' ' -f1 > repos/cloudinit/centos-7.2-20151214-cloudimg-amd64.img.sha512
140 |
141 | No modification to the image - beyond decompression and renaming - was necessary.
142 |
--------------------------------------------------------------------------------
/downburst/image.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import logging
3 | import requests
4 |
5 | from lxml import etree
6 |
7 | from . import discover
8 | from . import exc
9 | from . import template
10 |
11 | log = logging.getLogger(__name__)
12 |
13 | def remove_image_if_corrupt(pool, name):
14 | """
15 | Check for Partial download (bad requests or control-c'd)
16 | If file-size is unreasonable (<50 megabytes) remove the
17 | template file.
18 | """
19 | # 50 megabytes
20 | minimum = 50 * 1048576
21 | vol = pool.storageVolLookupByName(name)
22 | size = vol.info()[1]
23 | if size < minimum:
24 | log.info('Deleting Corrupt Volume: {name} as size is < {minimum}MiB ({size}MiB)'.format(
25 | name=name,
26 | minimum=minimum/1048576,
27 | size=size/1048576))
28 | vol.delete(flags=0)
29 | return False
30 | return True
31 |
32 | def list_cloud_images(pool, distro, distroversion, arch):
33 | """
34 | List all Cloud images in the libvirt pool.
35 | Return the keys.
36 | """
37 |
38 | #Fix distro version if someone did not use quotes
39 | if distro == "ubuntu":
40 | if isinstance(distroversion, float):
41 | distroversion = '%.2f' % distroversion
42 |
43 | PREFIX = distro+"-"+distroversion+"-"
44 | SUFFIX = '-cloudimg-'+arch+'.img'
45 | SUFFIXRAW = '-cloudimg-'+arch+'.raw'
46 |
47 | for name in pool.listVolumes():
48 | if not 'cloudimg' in name:
49 | continue
50 | log.debug('Considering image: %s', name)
51 | if not name.startswith(PREFIX):
52 | continue
53 | if not (name.endswith(SUFFIX) or name.endswith(SUFFIXRAW)):
54 | continue
55 | if len(name) <= len(PREFIX) + len(SUFFIX):
56 | # no serial number in the middle
57 | continue
58 | # found one!
59 | if not remove_image_if_corrupt(pool, name):
60 | # delete if corrupt!
61 | continue
62 | log.debug('Saw image: %s', name)
63 | yield name
64 |
65 |
66 | def find_cloud_image(pool, distro, distroversion, arch):
67 | """
68 | Find a Cloud image in the libvirt pool.
69 | Return the name.
70 | """
71 | names = list_cloud_images(pool, distro=distro, distroversion=distroversion, arch=arch)
72 | # converting into a list because max([]) raises ValueError, and we
73 | # really don't want to confuse that with exceptions from inside
74 | # the generator
75 | names = list(names)
76 |
77 | if not names:
78 | log.debug('No cloud images found.')
79 | return None
80 |
81 | log.debug(names)
82 | # the build serial is zero-padded, hence alphabetically sortable;
83 | # max is the latest image
84 | latest=max(names)
85 | log.debug(f"Found latest image {latest}")
86 | return latest
87 |
88 |
89 | def upload_volume(vol, fp, hash_function, checksum):
90 | """
91 | Upload a volume into a libvirt pool.
92 | """
93 |
94 | h = hashlib.new(hash_function)
95 | stream = vol.connect().newStream(flags=0)
96 | vol.upload(stream=stream, offset=0, length=0, flags=0)
97 |
98 | def handler(stream, nbytes, _):
99 | data = fp.read(nbytes)
100 | h.update(data)
101 | return data
102 | stream.sendAll(handler, None)
103 |
104 | if h.hexdigest() != checksum:
105 | stream.abort()
106 | vol.delete(flags=0)
107 | raise exc.ImageHashMismatchError()
108 | stream.finish()
109 |
110 |
111 | def ensure_cloud_image(pool, distro, distroversion, arch, forcenew=False):
112 | """
113 | Ensure that the Ubuntu Cloud image is in the libvirt pool.
114 | Returns the volume.
115 | """
116 |
117 | log.debug('Listing cloud image in libvirt...')
118 | name = find_cloud_image(pool=pool, distro=distro, distroversion=distroversion, arch=arch)
119 | raw = False
120 | if not forcenew:
121 | if name is not None:
122 | # all done
123 | if name.endswith('.raw'):
124 | raw = True
125 | log.debug('Already have cloud image: %s', name)
126 | vol = pool.storageVolLookupByName(name)
127 | return vol, raw
128 |
129 | log.debug('Discovering cloud images...')
130 | image = discover.get(distro=distro, distroversion=distroversion, arch=arch)
131 | log.debug('Will fetch serial number: %s', image['serial'])
132 |
133 | url = image['url']
134 | if url.endswith('.raw'):
135 | raw = True
136 | log.info('Downloading image: %s', url)
137 |
138 | # prefetch used to default to False; 0.13.6 changed that to True, and
139 | # 1.0.0 changed it to 'stream' with the opposite sense. We really
140 | # want streaming behavior no matter which version of requests; try
141 | # to cope with any version.
142 | if tuple(map(int, requests.__version__.split('.'))) < (1,0,0):
143 | r = requests.get(url, prefetch=False)
144 | else:
145 | r = requests.get(url, stream=True)
146 |
147 | # volumes have no atomic completion marker; this will forever be
148 | # racy!
149 | ext = '.img'
150 | if raw:
151 | ext = '.raw'
152 | PREFIX = distro+"-"+distroversion+"-"
153 | SUFFIX = '-cloudimg-'+arch+ext
154 |
155 | name = '{prefix}{serial}{suffix}'.format(
156 | prefix=PREFIX,
157 | serial=image['serial'],
158 | suffix=SUFFIX,
159 | )
160 | log.debug('Creating libvirt volume: %s ...', name)
161 | volxml = template.volume(
162 | name=name,
163 | raw=raw,
164 | # TODO we really should feed in a capacity, but we don't know
165 | # what it should be.. libvirt pool refresh figures it out, but
166 | # that's probably expensive
167 | # capacity=2*1024*1024,
168 | )
169 | vol = pool.createXML(etree.tostring(volxml).decode(), flags=0)
170 | upload_volume(
171 | vol=vol,
172 | fp=r.raw,
173 | hash_function=image['hash_function'],
174 | checksum=image['checksum'],
175 | )
176 | # TODO only here to autodetect capacity
177 | pool.refresh(flags=0)
178 | return vol, raw
179 |
180 |
--------------------------------------------------------------------------------
/downburst/template.py:
--------------------------------------------------------------------------------
1 | import distro
2 | import logging
3 |
4 | from lxml import etree
5 | import importlib
6 |
7 | log = logging.getLogger(__name__)
8 |
9 | emulator_path = None
10 |
11 | def parse_rbd_monitor(monitorlist):
12 | monitors = dict()
13 | for monitor in monitorlist.split(','):
14 | port = '6789'
15 | if ':' in monitor:
16 | port = monitor.split(':')[1]
17 | monitor = monitor.split(':')[0]
18 | monitors[monitor] = port
19 | return monitors
20 |
21 | def rbd_pool(
22 | name,
23 | pool,
24 | monitorlist,
25 | user,
26 | secret
27 | ):
28 |
29 | root = etree.Element('pool', type='rbd')
30 | etree.SubElement(root, 'name').text = name
31 | rsource = etree.SubElement(root, 'source')
32 | etree.SubElement(rsource,'name').text = pool
33 |
34 | for monitor, port in parse_rbd_monitor(monitorlist).iteritems():
35 | etree.SubElement(rsource, 'host', name=monitor, port=port)
36 |
37 | if user:
38 | auth = etree.SubElement(rsource, 'auth', username=user, type='ceph')
39 | etree.SubElement(auth, 'secret', uuid=secret)
40 | return root
41 |
42 | def rbd_volume(
43 | name,
44 | capacity,
45 | pool,
46 | ):
47 | root = etree.Element('volume')
48 | etree.SubElement(root, 'name').text = name
49 | etree.SubElement(root, 'source')
50 | etree.SubElement(root, 'capacity', unit='bytes').text = str(capacity)
51 | etree.SubElement(root, 'allocation', unit='bytes').text = str(capacity)
52 | target = etree.SubElement(root, 'target')
53 | etree.SubElement(target, 'path').text = 'rbd:{pool}/{name}'.format(pool=pool, name=name)
54 | etree.SubElement(target, 'format', type='unknown')
55 | permissions = etree.SubElement(target, 'permissions')
56 | etree.SubElement(permissions, 'mode').text = '00'
57 | etree.SubElement(permissions, 'owner').text = '0'
58 | etree.SubElement(permissions, 'group').text = '0'
59 | return root
60 |
61 | def volume(
62 | name,
63 | capacity=0,
64 | format_=None,
65 | sparse=True,
66 | raw = False,
67 | ):
68 | root = etree.Element('volume')
69 | etree.SubElement(root, 'name').text = name
70 | etree.SubElement(root, 'capacity').text = '{0:d}'.format(capacity)
71 | if sparse:
72 | etree.SubElement(root, 'allocation').text = '0'
73 | if raw:
74 | _format = 'raw'
75 | target = etree.SubElement(root, 'target')
76 | if format_ is None:
77 | format_ = 'qcow2'
78 | etree.SubElement(target, 'format', type=format_)
79 | return root
80 |
81 |
82 | def volume_clone(
83 | name,
84 | parent_vol,
85 | capacity=None,
86 | raw = False
87 | ):
88 | (_type_, parent_capacity, _allocation) = parent_vol.info()
89 | if capacity is None:
90 | capacity = parent_capacity
91 | type = 'qcow2'
92 | sparse = False
93 | if raw:
94 | type = 'raw'
95 | sparse = False
96 | root = volume(name=name, capacity=capacity, sparse=sparse, raw=raw)
97 | backing = etree.SubElement(root, 'backingStore')
98 | etree.SubElement(backing, 'format', type=type)
99 | etree.SubElement(backing, 'path').text = parent_vol.key()
100 | return root
101 |
102 | def get_emulator_path():
103 | global emulator_path
104 | if emulator_path:
105 | return emulator_path
106 | log.debug('The host distro id is %s', distro.id())
107 | if any(distro.id().startswith(_)
108 | for _ in ('opensuse', 'sles')):
109 | path = '/usr/bin/qemu-kvm'
110 | elif any(distro.id().startswith(_)
111 | for _ in ('centos', 'fedora', 'rhel')):
112 | path = '/usr/libexec/qemu-kvm'
113 | elif any(distro.id().startswith(_)
114 | for _ in ('ubuntu', 'debian')):
115 | path = '/usr/bin/kvm'
116 | else:
117 | raise Exception("Can't get emulator path, the distro '%s' "
118 | "is not supported yet" % distro.id())
119 | log.debug('Using emulator path: "%s"', path)
120 | emulator_path = path
121 | return emulator_path
122 |
123 | def domain(
124 | name,
125 | disk_key,
126 | iso_key,
127 | ram=None,
128 | cpus=None,
129 | networks=None,
130 | additional_disks_key=None,
131 | rbd_disks_key=None,
132 | rbd_details=None,
133 | hypervisor='kvm',
134 | raw = False,
135 | emulator = None,
136 | ):
137 | with importlib.resources.files('downburst').joinpath('template.xml').open() as f:
138 | tree = etree.parse(f)
139 | (domain,) = tree.xpath('/domain')
140 | domain.set('type', hypervisor)
141 |
142 | n = etree.SubElement(domain, 'name')
143 | n.text = name
144 |
145 | #
146 | #
147 | #
148 | #
149 | #
150 | type = 'qcow2'
151 | if raw:
152 | type = 'raw'
153 | (devices,) = tree.xpath('/domain/devices')
154 | emulator_element = devices.find('emulator')
155 | emulator_path = emulator or get_emulator_path()
156 | if emulator_element is not None:
157 | log.debug('Overriding xpath /domain/devices/emulator in xml template with: %s'
158 | % emulator_path)
159 | emulator_element.text = emulator_path
160 | else:
161 | etree.SubElement(devices, 'emulator').text = emulator_path
162 | disk = etree.SubElement(devices, 'disk', type='file', device='disk')
163 | etree.SubElement(disk, 'driver', name='qemu', type=type)
164 | etree.SubElement(disk, 'source', file=disk_key)
165 | etree.SubElement(disk, 'target', dev='vda', bus='virtio')
166 | letters = 'abcdefghijklmnopqrstuvwxyz'
167 | x = 0
168 | if additional_disks_key is not None:
169 | for key in additional_disks_key:
170 | x += 1
171 |
172 | # Skip a because vda = boot drive. Drives should start
173 | # at vdb and continue: vdc, vdd, etc...
174 |
175 | blockdevice = 'vd' + letters[x]
176 |
177 | #
178 | #
179 | #
180 | #
181 | #
182 | (devices,) = tree.xpath('/domain/devices')
183 | disk = etree.SubElement(devices, 'disk', type='file', device='disk')
184 | etree.SubElement(disk, 'driver', name='qemu', type='raw')
185 | etree.SubElement(disk, 'source', file=key)
186 | etree.SubElement(disk, 'target', dev=blockdevice, bus='virtio')
187 | if rbd_disks_key is not None:
188 | for key in rbd_disks_key:
189 | x += 1
190 |
191 | # Skip a because vda = boot drive. Drives should start
192 | # at vdb and continue: vdc, vdd, etc...
193 |
194 | blockdevice = 'vd' + letters[x]
195 |
196 | #
197 | #
198 | #
199 | #
200 | #
201 |
202 | (devices,) = tree.xpath('/domain/devices')
203 | disk = etree.SubElement(devices, 'disk', type='network')
204 | etree.SubElement(disk, 'driver', name='qemu', type='raw')
205 | rsource = etree.SubElement(disk, 'source', protocol='rbd', name=key)
206 | for monitor, port in parse_rbd_monitor(rbd_details['ceph_cluster_monitors']).iteritems():
207 | etree.SubElement(rsource, 'host', name=monitor, port=port)
208 |
209 | etree.SubElement(disk, 'target', dev=blockdevice, bus='virtio')
210 | if rbd_details['ceph_cluster_user']:
211 | auth = etree.SubElement(disk, 'auth', username=rbd_details['ceph_cluster_user'])
212 | etree.SubElement(auth, 'secret', type='ceph', usage=rbd_details['ceph_cluster_secret'])
213 |
214 | #
215 | #
216 | #
217 | #
218 | #
219 | #
220 | disk = etree.SubElement(devices, 'disk', type='file', device='cdrom')
221 | etree.SubElement(disk, 'driver', name='qemu', type='raw')
222 | etree.SubElement(disk, 'source', file=iso_key)
223 | etree.SubElement(disk, 'target', dev='hdc', bus='ide')
224 |
225 | if ram is not None:
226 | # default unit is kibibytes, and libvirt <0.9.11 doesn't
227 | # support changing that
228 | ram = int(round(ram/1024.0))
229 | (memory,) = tree.xpath('/domain/memory')
230 | memory.text = '{ram:d}'.format(ram=ram)
231 |
232 | if cpus is not None:
233 | (vcpu,) = tree.xpath('/domain/vcpu')
234 | vcpu.text = '{cpus:d}'.format(cpus=cpus)
235 |
236 | #
237 | #
238 | #
239 | #
240 | if networks is None:
241 | networks = [{}]
242 | for net in networks:
243 | net_elem = etree.SubElement(
244 | devices,
245 | 'interface',
246 | type='network',
247 | )
248 | etree.SubElement(net_elem, 'model', type='virtio')
249 | etree.SubElement(
250 | net_elem,
251 | 'source',
252 | network=net.get('source', 'default'),
253 | )
254 | mac = net.get('mac')
255 | if mac is not None:
256 | #
257 | etree.SubElement(net_elem, 'mac', address=mac)
258 |
259 | return tree
260 |
--------------------------------------------------------------------------------
/downburst/test/test_util.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from .. import util
4 |
5 | capabilities_xml_text ="""
6 |
7 |
8 | f39760cc-2adc-11b2-a85c-c6cfbb1a38ee
9 |
10 | x86_64
11 | Skylake-Client-IBRS
12 | Intel
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 | tcp
60 | rdma
61 |
62 |
63 |
64 |
65 |
66 | 65587660
67 | 16396915
68 | 0
69 | 0
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 | |
88 |
89 |
90 |
91 |
92 |
93 |
94 | apparmor
95 | 0
96 |
97 |
98 | dac
99 | 0
100 | +455:+456
101 | +455:+456
102 |
103 |
104 |
105 |
106 | hvm
107 |
108 | 32
109 | /usr/bin/qemu-system-i386
110 | pc-i440fx-5.2
111 | pc
112 | pc-q35-5.2
113 | q35
114 | pc-i440fx-2.12
115 | pc-i440fx-2.0
116 | xenpv
117 | pc-q35-4.2
118 | pc-i440fx-2.5
119 | pc-i440fx-4.2
120 | pc-i440fx-1.5
121 | pc-q35-2.7
122 | pc-i440fx-2.2
123 | pc-1.1
124 | pc-i440fx-2.7
125 | xenfv-3.1
126 | xenfv
127 | pc-q35-2.4
128 | pc-q35-2.10
129 | pc-i440fx-1.7
130 | pc-q35-5.1
131 | pc-q35-2.9
132 | pc-i440fx-2.11
133 | pc-q35-3.1
134 | pc-q35-4.1
135 | pc-i440fx-2.4
136 | pc-1.3
137 | pc-i440fx-4.1
138 | pc-i440fx-5.1
139 | pc-i440fx-2.9
140 | isapc
141 | pc-i440fx-1.4
142 | pc-q35-2.6
143 | pc-i440fx-3.1
144 | pc-q35-2.12
145 | pc-i440fx-2.1
146 | pc-1.0
147 | pc-q35-4.0.1
148 | pc-i440fx-2.6
149 | pc-i440fx-1.6
150 | pc-q35-5.0
151 | pc-q35-2.8
152 | pc-i440fx-2.10
153 | pc-q35-3.0
154 | pc-q35-4.0
155 | xenfv-4.2
156 | microvm
157 | pc-i440fx-2.3
158 | pc-1.2
159 | pc-i440fx-4.0
160 | pc-i440fx-5.0
161 | pc-i440fx-2.8
162 | pc-q35-2.5
163 | pc-i440fx-3.0
164 | pc-q35-2.11
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 | hvm
181 |
182 | 64
183 | /usr/bin/qemu-system-x86_64
184 | pc-i440fx-5.2
185 | pc
186 | pc-q35-5.2
187 | q35
188 | pc-i440fx-2.12
189 | pc-i440fx-2.0
190 | xenpv
191 | pc-q35-4.2
192 | pc-i440fx-2.5
193 | pc-i440fx-4.2
194 | pc-i440fx-1.5
195 | pc-q35-2.7
196 | pc-i440fx-2.2
197 | pc-1.1
198 | pc-i440fx-2.7
199 | xenfv-3.1
200 | xenfv
201 | pc-q35-2.4
202 | pc-q35-2.10
203 | pc-i440fx-1.7
204 | pc-q35-5.1
205 | pc-q35-2.9
206 | pc-i440fx-2.11
207 | pc-q35-3.1
208 | pc-q35-4.1
209 | pc-i440fx-2.4
210 | pc-1.3
211 | pc-i440fx-4.1
212 | pc-i440fx-5.1
213 | pc-i440fx-2.9
214 | isapc
215 | pc-i440fx-1.4
216 | pc-q35-2.6
217 | pc-i440fx-3.1
218 | pc-q35-2.12
219 | pc-i440fx-2.1
220 | pc-1.0
221 | pc-i440fx-2.6
222 | pc-q35-4.0.1
223 | pc-i440fx-1.6
224 | pc-q35-5.0
225 | pc-q35-2.8
226 | pc-i440fx-2.10
227 | pc-q35-3.0
228 | pc-q35-4.0
229 | xenfv-4.2
230 | microvm
231 | pc-i440fx-2.3
232 | pc-1.2
233 | pc-i440fx-4.0
234 | pc-i440fx-5.0
235 | pc-i440fx-2.8
236 | pc-q35-2.5
237 | pc-i440fx-3.0
238 | pc-q35-2.11
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 | """
253 |
254 | @pytest.mark.parametrize(
255 | 'arch,res',
256 | [
257 | ('x86_64', '/usr/bin/qemu-system-x86_64'),
258 | ('amd64', '/usr/bin/qemu-system-x86_64'),
259 | ('i686', '/usr/bin/qemu-system-i386'),
260 | ]
261 | )
262 | def test_lookup_emulator(arch, res):
263 | path = util.lookup_emulator(capabilities_xml_text, arch)
264 | assert path == res
265 |
266 |
--------------------------------------------------------------------------------
/downburst/create.py:
--------------------------------------------------------------------------------
1 | import libvirt
2 | import logging
3 | import syslog
4 | import os
5 |
6 | from distro import id as distro_id
7 | from lxml import etree
8 |
9 | from . import dehumanize
10 | from . import image
11 | from . import iso
12 | from . import exc
13 | from . import meta
14 | from . import template
15 | from . import wait
16 | from . import discover
17 | from . import util
18 |
19 | log = logging.getLogger(__name__)
20 |
21 |
22 | def lookup_emulator_path(conn, arch):
23 | text = conn.getCapabilities()
24 | return util.lookup_emulator(text, arch)
25 |
26 | def create(args):
27 | log.debug('Connecting to libvirt...')
28 | conn = libvirt.open(args.connect)
29 | if conn is None:
30 | raise exc.LibvirtConnectionError()
31 |
32 | meta_data = meta.gen_meta(
33 | name=args.name,
34 | extra_meta=args.meta_data,
35 | nokey=args.nokey,
36 | )
37 |
38 | user_data = meta.gen_user(
39 | name=args.name,
40 | extra_user=args.user_data,
41 | )
42 |
43 | if args.distro:
44 | distro = args.distro
45 | else:
46 | distro = meta_data.get('downburst', {}).get('distro')
47 |
48 | if distro is None:
49 | distro = "ubuntu"
50 |
51 | if args.distroversion:
52 | distroversion = args.distroversion
53 | else:
54 | distroversion = meta_data.get('downburst', {}).get('distroversion')
55 |
56 | # If ubuntu distroversion contains non version (IE: quantal) convert to version:
57 | if distroversion:
58 | if distro == 'ubuntu' and ('.' not in distroversion):
59 | handler = discover.UbuntuHandler()
60 | distroversion = handler.get_version(distroversion)
61 |
62 | if distroversion is None:
63 | defaultversion = dict(
64 | ubuntu="12.04",
65 | fedora="17",
66 | centos="6.3",
67 | opensuse="15.1",
68 | sles="11-sp2",
69 | rhel="6.3",
70 | rocky="9.5",
71 | debian='6.0'
72 | )
73 | distroversion = defaultversion[distro]
74 |
75 | if args.arch:
76 | arch = args.arch
77 | else:
78 | arch = meta_data.get('downburst', {}).get('arch')
79 |
80 | if arch == "x86_64":
81 | arch = "amd64"
82 |
83 | if arch == "aarch64":
84 | arch == "arm64"
85 |
86 | if arch is None:
87 | arch = "amd64"
88 |
89 | emulator_path = lookup_emulator_path(conn, arch)
90 | if emulator_path:
91 | log.debug(f'Determined emulator path: {emulator_path}')
92 | # check if the vm exists already, complain if so. this would
93 | # normally use conn.lookupByName, but that logs on all errors;
94 | # avoid the noise.
95 | if args.name in conn.listDefinedDomains():
96 | raise exc.VMExistsError(args.name)
97 |
98 | log.debug('Opening libvirt pool...')
99 |
100 | # Check if pool with same name of guest exists, use it if it does
101 | pool = ''
102 | pools = conn.listStoragePools()
103 | for poolentry in pools:
104 | if poolentry == args.name:
105 | pool = conn.storagePoolLookupByName(poolentry)
106 | break
107 | if not pool:
108 | pool = conn.storagePoolLookupByName('default')
109 |
110 | vol, raw = image.ensure_cloud_image(pool=pool, distro=distro, distroversion=distroversion, arch=arch, forcenew=args.forcenew)
111 |
112 | if args.wait:
113 | user_data.append("""\
114 | #!/bin/sh
115 | # eject the cdrom (containing the cloud-init metadata)
116 | # as a signal that we've reached full functionality;
117 | # this is used by ``downburst create --wait``
118 | exec eject /dev/cdrom
119 | """)
120 |
121 | capacity = meta_data.get('downburst', {}).get('disk-size', '10G')
122 | capacity = dehumanize.parse(capacity)
123 | additional_disks = meta_data.get('downburst', {}).get('additional-disks')
124 | additional_disks_size = meta_data.get('downburst', {}).get('additional-disks-size', '10G')
125 | additional_disks_size = dehumanize.parse(additional_disks_size)
126 | ceph_cluster_name = meta_data.get('downburst', {}).get('ceph-cluster-name')
127 | ceph_cluster_monitors = meta_data.get('downburst', {}).get('ceph-cluster-monitors')
128 | ceph_cluster_pool = meta_data.get('downburst', {}).get('ceph-cluster-pool')
129 | ceph_cluster_user = meta_data.get('downburst', {}).get('ceph-cluster-user')
130 | ceph_cluster_secret = meta_data.get('downburst', {}).get('ceph-cluster-secret')
131 | rbd_disks = meta_data.get('downburst', {}).get('rbd-disks')
132 | rbd_disks_size = dehumanize.parse(meta_data.get('downburst', {}).get('rbd-disks-size'))
133 | rbd_details = dict()
134 | rbd_details['ceph_cluster_name'] = ceph_cluster_name
135 | rbd_details['ceph_cluster_monitors'] = ceph_cluster_monitors
136 | rbd_details['ceph_cluster_pool'] = ceph_cluster_pool
137 | rbd_details['ceph_cluster_user'] = ceph_cluster_user
138 | rbd_details['ceph_cluster_secret'] = ceph_cluster_secret
139 |
140 |
141 | clonexml = template.volume_clone(
142 | name='{name}.img'.format(name=args.name),
143 | parent_vol=vol,
144 | capacity=capacity,
145 | raw=raw,
146 | )
147 | clone = pool.createXML(etree.tostring(clonexml).decode(), flags=0)
148 |
149 | iso_vol = iso.create_meta_iso(
150 | pool=pool,
151 | name=args.name,
152 | meta_data=meta_data,
153 | user_data=user_data,
154 | )
155 |
156 | # We want the range to be 2 - X depending on disk count.
157 | # Since there is already a boot volume we want the image
158 | # names to be appended with -2, -3, -4, etc... for the
159 | # additional disks.
160 | additional_disks_key = []
161 | if additional_disks is not None:
162 | for disknum in range(1, additional_disks + 1):
163 | disknum += 1
164 | diskname = args.name + '-' + str(disknum) + '.img'
165 | diskxml = template.volume(
166 | name=diskname,
167 | capacity=additional_disks_size,
168 | format_='raw',
169 | sparse=False,
170 | )
171 | additional_disks_key.append(pool.createXML(etree.tostring(diskxml).decode(), flags=0).key())
172 | if not additional_disks_key:
173 | additional_disks_key = None
174 |
175 | rbd_disks_key = []
176 | if rbd_disks:
177 | assert ceph_cluster_name != None, "Unable to setup RBD Storage Pool. Pool name Required but is: %s" % pool
178 | try:
179 | rbdpool = conn.storagePoolLookupByName(ceph_cluster_name)
180 | except libvirt.libvirtError:
181 | poolxml = template.rbd_pool(
182 | name=ceph_cluster_name,
183 | monitorlist=ceph_cluster_monitors,
184 | pool=ceph_cluster_pool,
185 | user=ceph_cluster_user,
186 | secret=ceph_cluster_secret,
187 | )
188 | conn.storagePoolCreateXML(etree.tostring(poolxml), 0)
189 | rbdpool = conn.storagePoolLookupByName(ceph_cluster_name)
190 |
191 | if not additional_disks:
192 | additional_disks = 0
193 | for rbdnum in range(1 + additional_disks, additional_disks + rbd_disks + 1):
194 | rbdnum += 1
195 | rbdname = '{name}-{rbdnum}'.format(name=args.name, rbdnum=rbdnum)
196 | rbdxml = template.rbd_volume(
197 | name=rbdname,
198 | capacity=rbd_disks_size,
199 | pool=ceph_cluster_pool,
200 | )
201 | rbd_disks_key.append(rbdpool.createXML(etree.tostring(rbdxml), flags=0).key())
202 | if not rbd_disks_key:
203 | rbd_disks_key = None
204 |
205 | ram = meta_data.get('downburst', {}).get('ram')
206 | ram = dehumanize.parse(ram)
207 | cpus = meta_data.get('downburst', {}).get('cpus')
208 | networks = meta_data.get('downburst', {}).get('networks')
209 | domainxml = template.domain(
210 | name=args.name,
211 | disk_key=clone.key(),
212 | iso_key=iso_vol.key(),
213 | ram=ram,
214 | cpus=cpus,
215 | networks=networks,
216 | additional_disks_key=additional_disks_key,
217 | rbd_disks_key=rbd_disks_key,
218 | rbd_details=rbd_details,
219 | hypervisor=args.hypervisor,
220 | emulator=emulator_path,
221 | )
222 | dom = conn.defineXML(etree.tostring(domainxml).decode())
223 | dom.create()
224 | if distro_id() == 'darwin':
225 | syslog_message = f'Created guest: {args.name} on {args.connect}'
226 | else:
227 | try:
228 | env = os.environ
229 | pid = os.getpid()
230 | # os.getppid() wont return the correct value:
231 | stat_path = '/proc/{pid}/stat'.format(pid=pid)
232 | ppid = open(stat_path).read().split()[3]
233 | cmdline_path = '/proc/{ppid}/cmdline'.format(ppid=ppid)
234 | ppcmdline = open(cmdline_path).read().split('\x00')
235 |
236 | except (IndexError, IOError):
237 | log.exception('Something went wrong getting PPID/cmdlineinfo')
238 | ppcmdline = 'ERROR_RETREIVING'
239 |
240 | syslog_message = 'Created guest: {name} on {host} by User: {username} PPCMD: {pcmd}'.format(
241 | name=args.name,
242 | host=args.connect,
243 | username=env.get('USER'),
244 | pcmd=ppcmdline)
245 | syslog.syslog(syslog.LOG_ERR, syslog_message)
246 |
247 | if args.wait:
248 | log.debug('Waiting for vm to be initialized...')
249 | wait.wait_for_cdrom_eject(dom)
250 |
251 |
252 | def make(parser):
253 | """
254 | Create an Ubuntu Cloud Image vm
255 | """
256 | parser.add_argument(
257 | '--user-data',
258 | metavar='FILE',
259 | action='append',
260 | help='extra user-data, a cloud-config-archive or arbitrary file',
261 | )
262 | parser.add_argument(
263 | '--meta-data',
264 | metavar='FILE',
265 | action='append',
266 | help='extra meta-data, must contain a yaml mapping',
267 | )
268 | parser.add_argument(
269 | '--wait',
270 | action='store_true',
271 | help='wait for VM to initialize',
272 | )
273 | parser.add_argument(
274 | '--distro',
275 | metavar='DISTRO',
276 | help='Distribution of the vm',
277 | )
278 | parser.add_argument(
279 | '--distroversion',
280 | metavar='DISTROVERSION',
281 | help='Distribution version of the vm',
282 | )
283 | parser.add_argument(
284 | '--nokey',
285 | action='store_true',
286 | help='Do not add the default ssh key (from Inktank teuthology) to authorized_hosts. Should be used for non-Inktank machines',
287 | )
288 | parser.add_argument(
289 | '--forcenew',
290 | action='store_true',
291 | help='Instead if the cloud-init image already exists, force the attempt to download newest available image',
292 | )
293 | parser.add_argument(
294 | '--arch',
295 | metavar='arch',
296 | help='Architecture of the vm (amd64/i386)',
297 | )
298 | parser.add_argument(
299 | '--hypervisor',
300 | metavar='HYPERVISOR',
301 | help='The hypervisor used (kvm)'),
302 | parser.add_argument(
303 | 'name',
304 | metavar='NAME',
305 | help='unique name to give to the vm',
306 | # TODO check valid syntax for hostname
307 | )
308 | parser.set_defaults(
309 | func=create,
310 | distro=[],
311 | distroversion=[],
312 | user_data=[],
313 | meta_data=[],
314 | hypervisor='kvm',
315 | )
316 |
--------------------------------------------------------------------------------
/downburst/discover.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import json
3 | import logging
4 | import os
5 | import re
6 | import requests
7 |
8 | from html.parser import HTMLParser
9 |
10 | log = logging.getLogger(__name__)
11 |
12 | URL=os.environ.get("DOWNBURST_DISCOVER_URL", "http://download.ceph.com/cloudinit/")
13 |
14 | class Parser(HTMLParser):
15 | def __init__(self):
16 | self.filenames = []
17 | HTMLParser.__init__(self)
18 |
19 | def handle_starttag(self, tag, attrs):
20 | if tag == 'a':
21 | for key, val in attrs:
22 | if key == 'href' and (val.endswith('.img') or val.endswith('.raw')):
23 | self.filenames.append(val)
24 |
25 | class RockyImageParser(HTMLParser):
26 | def __init__(self):
27 | self.urls = []
28 | HTMLParser.__init__(self)
29 |
30 | def handle_starttag(self, tag, attrs):
31 | if tag == 'a':
32 | for key, val in attrs:
33 | if key == 'href' and val.endswith('.qcow2') and 'GenericCloud' in val:
34 | self.urls.append(val)
35 |
36 | class OpenSUSEImageParser(HTMLParser):
37 | def __init__(self):
38 | self.urls = []
39 | HTMLParser.__init__(self)
40 |
41 | def handle_starttag(self, tag, attrs):
42 | if tag == 'a':
43 | for key, val in attrs:
44 | if key == 'href' and val.endswith('.qcow2') and 'Cloud' in val:
45 | self.urls.append(val)
46 |
47 |
48 | class CentOSImageParser(HTMLParser):
49 | def __init__(self):
50 | self.urls = []
51 | HTMLParser.__init__(self)
52 |
53 | def handle_starttag(self, tag, attrs):
54 | if tag == 'a':
55 | for key, val in attrs:
56 | if key == 'href' and val.endswith('.qcow2') and 'GenericCloud' in val:
57 | self.urls.append(val)
58 |
59 |
60 | class FedoraImageParser(HTMLParser):
61 | def __init__(self):
62 | self.urls = []
63 | HTMLParser.__init__(self)
64 |
65 | def handle_starttag(self, tag, attrs):
66 | if tag == 'a':
67 | for key, val in attrs:
68 | if key == 'href' and val.endswith('.qcow2') and 'Cloud-Base-Generic' in val:
69 | self.urls.append(val)
70 |
71 |
72 | class FedoraChecksumFileParser(HTMLParser):
73 | def __init__(self):
74 | self.urls = []
75 | HTMLParser.__init__(self)
76 |
77 | def handle_starttag(self, tag, attrs):
78 | if tag == 'a':
79 | for key, val in attrs:
80 | if key == 'href' and val.endswith('-CHECKSUM'):
81 | self.urls.append(val)
82 |
83 |
84 | class ReleaseParser(HTMLParser):
85 | def __init__(self):
86 | self.dirs = []
87 | HTMLParser.__init__(self)
88 |
89 | def handle_starttag(self, tag, attrs):
90 | if tag == 'a':
91 | for key, val in attrs:
92 | if key == 'href' and val.startswith('release-'):
93 | self.dirs.append(val.rstrip('/'))
94 |
95 | class UbuntuVersionParser(HTMLParser):
96 | def __init__(self):
97 | self.versions = []
98 | HTMLParser.__init__(self)
99 |
100 | def handle_starttag(self, tag, attrs):
101 | if tag == 'a':
102 | r = re.compile(r'^([0-9]+\.[0-9]+(?:\.[0-9]+)?)/')
103 | for key, val in attrs:
104 | if key == 'href':
105 | res = r.search(val)
106 | if res:
107 | ver = res.group(1)
108 | (major, minor) = ver.split('.')[:2]
109 | # Skip because there is no interest in earlier version than 18
110 | if int(major) < 18:
111 | continue
112 | self.versions.append(val.rstrip('/'))
113 |
114 | class RockyVersionParser(HTMLParser):
115 | def __init__(self):
116 | self.versions = []
117 | HTMLParser.__init__(self)
118 |
119 | def handle_starttag(self, tag, attrs):
120 | if tag == 'a':
121 | r = re.compile(r'^([0-9]+\.[0-9]+)/')
122 | for key, val in attrs:
123 | if key == 'href':
124 | res = r.search(val)
125 | if res:
126 | ver = res.group(1)
127 | (major, minor) = ver.split('.')[:2]
128 | # Skip versions before 8.10 and 9.5 because images are removed
129 | if int(major) == 8 and int(minor) < 10:
130 | continue
131 | if int(major) == 9 and int(minor) < 5:
132 | continue
133 | self.versions.append(val.rstrip('/'))
134 |
135 | class OpenSUSEVersionParser(HTMLParser):
136 | def __init__(self):
137 | self.versions = []
138 | HTMLParser.__init__(self)
139 |
140 | def handle_starttag(self, tag, attrs):
141 | if tag == 'a':
142 | r = re.compile(r'^\./([0-9]{2}\.[0-9])/')
143 | for key, val in attrs:
144 | if key == 'href':
145 | res = r.search(val)
146 | if res:
147 | ver = res.group(1)
148 | (major, minor) = ver.split('.')
149 | # Skip 16.0 because there is no kvm cloud image released yet
150 | if int(major) == 16:
151 | continue
152 | # Skip versions before 15.5 because of low interest
153 | if int(major) == 15 and int(minor) < 5:
154 | continue
155 | # Skip all 42.x because discontinued
156 | if int(major) == 42:
157 | continue
158 | self.versions.append(ver)
159 |
160 | class CentOSVersionParser(HTMLParser):
161 | def __init__(self):
162 | self.versions = []
163 | HTMLParser.__init__(self)
164 | def handle_starttag(self, tag, attrs):
165 | if tag == 'a':
166 | r = re.compile(r'^([0-9]{1,2})-stream/')
167 | for key, val in attrs:
168 | if key == 'href':
169 | res = r.search(val)
170 | if res:
171 | ver = res.group(1)
172 | # Skip everything before 9 version
173 | if int(ver) < 9:
174 | continue
175 | self.versions.append(f'{ver}.stream')
176 |
177 | class FedoraVersionParser(HTMLParser):
178 | def __init__(self):
179 | self.versions = []
180 | HTMLParser.__init__(self)
181 | def handle_starttag(self, tag, attrs):
182 | if tag == 'a':
183 | r = re.compile(r'^([0-9]{1,2})/')
184 | for key, val in attrs:
185 | if key == 'href':
186 | res = r.search(val)
187 | if res:
188 | ver = res.group(1)
189 | # Skip everything before 41 version
190 | if int(ver) < 41:
191 | continue
192 | self.versions.append(f'{ver}')
193 |
194 | class DistroHandler:
195 | def get_releases(self) -> dict[str, str]:
196 | log.error(f"Method 'get_releases' is undefined for class {self.__class__.__name__}")
197 | return {}
198 |
199 | class UbuntuHandler(DistroHandler):
200 | URL = 'http://cloud-images.ubuntu.com'
201 |
202 | VERSION_TO_RELEASE = {
203 | '4.10': 'warty',
204 | '5.10': 'hoary',
205 | '5.10': 'breezy',
206 | '6.06': 'dapper',
207 | '6.10': 'edgy',
208 | '7.04': 'feisty',
209 | '7.10': 'gutsy',
210 | '8.04': 'hardy',
211 | '8.10': 'intrepid',
212 | '9.04': 'jaunty',
213 | '9.10': 'karmic',
214 | '10.04': 'lucid',
215 | '10.10': 'maverick',
216 | '11.04': 'natty',
217 | '11.10': 'oneiric',
218 | '12.04': 'precise',
219 | '12.10': 'quantal',
220 | '13.04': 'raring',
221 | '13.10': 'saucy',
222 | '14.04': 'trusty',
223 | '14.10': 'utopic',
224 | '15.04': 'vivid',
225 | '15.10': 'wily',
226 | '16.04': 'xenial',
227 | '18.04': 'bionic',
228 | '20.04': 'focal',
229 | '20.10': 'groovy',
230 | '21.04': 'hirsute',
231 | '21.10': 'impish',
232 | '22.04': 'jammy',
233 | '22.10': 'kinetic',
234 | '23.04': 'lunar',
235 | '23.10': 'mantic',
236 | '24.04': 'noble',
237 | '24.10': 'oracular',
238 | }
239 |
240 | RELEASE_TO_VERSION = {v:k for k, v in VERSION_TO_RELEASE.items()}
241 |
242 |
243 | def get_release(self, distroversion):
244 | try:
245 | if "." in distroversion:
246 | version = distroversion.split('.', 1)
247 | major = version[0]
248 | minor = version[1].split('.', 1)[0]
249 | return self.VERSION_TO_RELEASE[major + "." + minor]
250 | except KeyError:
251 | return distroversion
252 |
253 | def get_version(self, distroversion):
254 | try:
255 | return self.RELEASE_TO_VERSION[distroversion]
256 | except KeyError:
257 | pass
258 | return distroversion
259 |
260 | def get_latest_release_serial(self, release):
261 | url = self.URL + f"/releases/{release}"
262 | r = requests.get(url)
263 | r.raise_for_status()
264 | parser = ReleaseParser()
265 | parser.feed(r.content.decode())
266 | parser.close()
267 | latest_release_directory = sorted(parser.dirs)[-1]
268 | if latest_release_directory:
269 | serial = latest_release_directory.split('-')[1]
270 | return serial, 'release'
271 |
272 | raise NameError('Image not found on server at ' + url)
273 |
274 | def get_releases(self) -> dict[str, str]:
275 | """
276 | Returns dict version
277 | """
278 | url = f"{self.URL}/releases/"
279 | log.debug(f"Lookup for Ubuntu release by: {url}")
280 | r = requests.get(url)
281 | r.raise_for_status()
282 | parser = UbuntuVersionParser()
283 | parser.feed(r.content.decode())
284 | parser.close()
285 | version_release = {}
286 | for ver in parser.versions:
287 | v = ver.split(".")
288 | if len(v) > 1:
289 | major_minor = f"{v[0]}.{v[1]}"
290 | release = self.VERSION_TO_RELEASE.get(major_minor)
291 | if release:
292 | version_release[ver] = release
293 | return version_release
294 |
295 | def get_filename(self, arch, version, state):
296 | if state == 'release':
297 | state = ''
298 | else:
299 | state = '-' + state
300 | major, minor = version.split('.')[0:2]
301 | if int(major) >= 23 and int(minor) >= 10 or int(major) >= 24:
302 | return 'ubuntu-' + major + '.' + minor + state + '-server-cloudimg-'+ arch + '.img'
303 | elif int(major) >= 20:
304 | return 'ubuntu-' + major + '.' + minor + state + '-server-cloudimg-'+ arch + '-disk-kvm.img'
305 | else:
306 | return 'ubuntu-' + major + '.' + minor + state + '-server-cloudimg-'+ arch + '-disk1.img'
307 |
308 |
309 | def get_base_url(self, release, serial, state):
310 | stability = ''
311 | added = 0
312 | for letter in state:
313 | if letter.isdigit() and added == 0:
314 | added=1
315 | stability += '-' + str(letter)
316 | else:
317 | stability += str(letter)
318 |
319 | if stability == 'release':
320 | location = stability + '-' + serial
321 | else:
322 | location = stability
323 | return self.URL + '/releases/' + release + '/' + location
324 |
325 | def get_url(self, base_url, filename):
326 | return base_url + "/" + filename
327 |
328 | def get_sha256(self, base_url, filename):
329 | url = base_url + "/SHA256SUMS"
330 | r = requests.get(url)
331 | rows = csv.DictReader(r.content.decode().strip().split("\n"), delimiter=" ",
332 | fieldnames=('hash', 'file'))
333 | for row in rows:
334 | if row['file'] == "*" + filename:
335 | return row['hash']
336 | raise NameError('SHA-256 checksums not found for file ' + filename +
337 | ' at ' + url)
338 |
339 |
340 | def __call__(self, distroversion, arch):
341 | distroversion = distroversion.lower()
342 | if arch == "x86_64":
343 | arch = "amd64"
344 | if arch == "aarch64":
345 | arch = "arm64"
346 | release = self.get_release(distroversion)
347 | log.debug(f"Found release: {release}")
348 | version = self.get_version(distroversion)
349 | serial, state = self.get_latest_release_serial(release)
350 | filename = self.get_filename(arch, version, state)
351 | base_url = self.get_base_url(release, serial, state)
352 | sha256 = self.get_sha256(base_url, filename)
353 | url = self.get_url(base_url, filename)
354 |
355 | return {'url': url, 'serial': serial, 'checksum': sha256,
356 | 'hash_function': 'sha256'}
357 |
358 |
359 | class FedoraHandler(DistroHandler):
360 | URL="https://download.fedoraproject.org"
361 |
362 | def get_releases(self) -> dict[str, str]:
363 | url = f"{self.URL}/pub/fedora/linux/releases/"
364 | log.debug(f"Lookup for Fedora releases by url {url}")
365 | r = requests.get(url)
366 | r.raise_for_status()
367 | parser = FedoraVersionParser()
368 | parser.feed(r.content.decode())
369 | parser.close()
370 | log.debug(f"Fedora versions: {parser.versions}")
371 | return {v:None for v in parser.versions}
372 |
373 | def get_sha256(self, base_url, filename):
374 | r = requests.get(base_url)
375 | r.raise_for_status()
376 | parser = FedoraChecksumFileParser()
377 | parser.feed(r.content.decode())
378 | parser.close()
379 | if len(parser.urls) < 1:
380 | raise RuntimeError(f"Unable to find checksum file by {base_url}")
381 | log.debug(f"Checksum files found: {parser.urls}")
382 | url = f"{base_url}/{parser.urls[0]}"
383 | r = requests.get(url)
384 | parser = re.compile(r"SHA256\s+\((.*\.qcow2)\)\s+=\s+([a-f0-9]+)$")
385 | for line in r.content.decode().strip().split("\n"):
386 | found = parser.search(line)
387 | if found:
388 | if found.group(1) == filename:
389 | return found.group(2)
390 | raise NameError('SHA-256 checksums not found for file ' + filename +
391 | ' at ' + url)
392 |
393 | def get_latest_release_image(self, url):
394 | r = requests.get(url)
395 | r.raise_for_status()
396 | parser = FedoraImageParser()
397 | parser.feed(r.content.decode())
398 | parser.close()
399 | r = re.compile(r"Cloud-Base-Generic-[0-9]+-([0-9]+\.[0-9]+)\.")
400 | for href in sorted(parser.urls, reverse=True):
401 | res = r.search(href)
402 | if res:
403 | serial=res.group(1)
404 | return href, serial
405 |
406 | raise NameError('Image not found on server at ' + url)
407 |
408 | def __call__(self, release, arch):
409 | if arch == "amd64":
410 | arch = "x86_64"
411 | if arch == "arm64":
412 | arch = "aarch64"
413 | base_url = self.URL + f"/pub/fedora/linux/releases/{release}/Cloud/{arch}/images"
414 | filename, serial = self.get_latest_release_image(base_url)
415 | log.debug(f"Found image for release '{release}': {filename} ({serial})")
416 | sha256 = self.get_sha256(base_url, filename)
417 | url = base_url + '/' + filename
418 | return {
419 | 'url': url,
420 | 'serial': serial.rstrip('.0'),
421 | 'checksum': sha256,
422 | 'hash_function': 'sha256'
423 | }
424 |
425 |
426 | class CentOSHandler(DistroHandler):
427 | URL="https://cloud.centos.org"
428 |
429 | def get_releases(self) -> dict[str, str]:
430 | url = f"{self.URL}/centos/"
431 | log.debug(f"Lookup for CentOS releases by url {url}")
432 | r = requests.get(url)
433 | r.raise_for_status()
434 | parser = CentOSVersionParser()
435 | parser.feed(r.content.decode())
436 | parser.close()
437 | log.debug(f"CentOS versions: {parser.versions}")
438 | return {v:None for v in parser.versions}
439 |
440 | def get_sha256(self, base_url, filename):
441 | url = base_url + "/CHECKSUM"
442 | r = requests.get(url)
443 | parser = re.compile(r"SHA256\s+\((.*\.qcow2)\)\s+=\s+([a-f0-9]+)$")
444 | for line in r.content.decode().strip().split("\n"):
445 | found = parser.search(line)
446 | if found:
447 | if found.group(1) == filename:
448 | return found.group(2)
449 | raise NameError('SHA-256 checksums not found for file ' + filename +
450 | ' at ' + url)
451 |
452 | def get_latest_release_image(self, url):
453 | r = requests.get(url)
454 | r.raise_for_status()
455 | parser = CentOSImageParser()
456 | parser.feed(r.content.decode())
457 | parser.close()
458 | r = re.compile(r"GenericCloud-[0-9]+-([0-9]+\.[0-9]+)\.")
459 | for href in sorted(parser.urls, reverse=True):
460 | res = r.search(href)
461 | if res:
462 | serial=res.group(1)
463 | return href, serial
464 |
465 | raise NameError('Image not found on server at ' + url)
466 |
467 | def get_release(self, distroversion):
468 | try:
469 | if "." in distroversion:
470 | version = distroversion.split('.', 1)
471 | major = version[0]
472 | if version[1] == 'stream':
473 | return f'{major}-stream'
474 | except KeyError:
475 | return distroversion
476 |
477 | def __call__(self, distroversion, arch):
478 | if arch == "amd64":
479 | arch = "x86_64"
480 | if arch == "arm64":
481 | arch = "aarch64"
482 | release = self.get_release(distroversion)
483 | base_url = self.URL + f"/centos/{release}/{arch}/images"
484 | filename, serial = self.get_latest_release_image(base_url)
485 | log.debug(f"Found image for release '{release}': {filename} ({serial})")
486 | sha256 = self.get_sha256(base_url, filename)
487 | url = base_url + '/' + filename
488 | return {
489 | 'url': url,
490 | 'serial': serial.rstrip('.0'),
491 | 'checksum': sha256,
492 | 'hash_function': 'sha256'
493 | }
494 |
495 |
496 | class AlmaHandler(DistroHandler):
497 | URL="https://repo.almalinux.org"
498 |
499 | def get_releases(self) -> dict[str, str]:
500 | url = f"{self.URL}/almalinux/"
501 | log.debug(f"Lookup for AlmaLinux releases by url {url}")
502 | r = requests.get(url)
503 | r.raise_for_status()
504 | parser = RockyVersionParser()
505 | parser.feed(r.content.decode())
506 | parser.close()
507 | log.debug(f"Alma versions: {parser.versions}")
508 | return {v:None for v in parser.versions}
509 |
510 | def get_sha256(self, base_url, filename):
511 | url = base_url + "/CHECKSUM"
512 | r = requests.get(url)
513 | parser = re.compile(r"([a-f0-9]+)\s+(.*\.qcow2)$")
514 | for line in r.content.decode().strip().split("\n"):
515 | found = parser.search(line)
516 | if found:
517 | if found.group(2) == filename:
518 | return found.group(1)
519 | raise NameError('SHA-256 checksums not found for file ' + filename +
520 | ' at ' + url)
521 |
522 | def get_latest_release_image(self, url):
523 | r = requests.get(url)
524 | r.raise_for_status()
525 | parser = RockyImageParser()
526 | parser.feed(r.content.decode())
527 | parser.close()
528 | r = re.compile(r"GenericCloud-[0-9]+\.[0-9]+-([0-9]+)\.")
529 | for href in parser.urls:
530 | res = r.search(href)
531 | if res:
532 | serial=res.group(1)
533 | return href, serial
534 |
535 | raise NameError('Image not found on server at ' + url)
536 |
537 | def __call__(self, release, arch):
538 | if arch == "amd64":
539 | arch = "x86_64"
540 | if arch == "arm64":
541 | arch = "aarch64"
542 | base_url = self.URL + f"/almalinux/{release}/cloud/{arch}/images"
543 | filename, serial = self.get_latest_release_image(base_url)
544 | log.debug(f"Found image for release '{release}': {filename} ({serial})")
545 | sha256 = self.get_sha256(base_url, filename)
546 | url = base_url + '/' + filename
547 | return {
548 | 'url': url,
549 | 'serial': serial.rstrip('.0'),
550 | 'checksum': sha256,
551 | 'hash_function': 'sha256'
552 | }
553 |
554 | class RockyHandler(DistroHandler):
555 | URL="https://dl.rockylinux.org"
556 |
557 | def get_releases(self) -> dict[str, str]:
558 | url = f"{self.URL}/pub/rocky/"
559 | log.debug(f"Lookup for Rockfy releases by url {url}")
560 | r = requests.get(url)
561 | r.raise_for_status()
562 | parser = RockyVersionParser()
563 | parser.feed(r.content.decode())
564 | parser.close()
565 | log.debug(f"Rocky versions: {parser.versions}")
566 | return {v:None for v in parser.versions}
567 |
568 | def get_sha256(self, base_url, filename):
569 | url = base_url + "/CHECKSUM"
570 | r = requests.get(url)
571 | parser = re.compile(r"SHA256 \((.*)\) = ([a-f0-9]+)")
572 | for line in r.content.decode().strip().split("\n"):
573 | found = parser.search(line)
574 | if found:
575 | if found.group(1) == filename:
576 | return found.group(2)
577 | raise NameError('SHA-256 checksums not found for file ' + filename +
578 | ' at ' + url)
579 |
580 | def get_latest_release_image(self, url):
581 | r = requests.get(url)
582 | r.raise_for_status()
583 | parser = RockyImageParser()
584 | parser.feed(r.content.decode())
585 | parser.close()
586 | r = re.compile(r"GenericCloud-Base-[0-9]+\.[0-9]+-([0-9]+\.[0-9]+)\.")
587 | for href in sorted(parser.urls, reverse=True):
588 | res = r.search(href)
589 | if res:
590 | serial=res.group(1)
591 | return href, serial
592 |
593 | raise NameError('Image not found on server at ' + url)
594 |
595 | def __call__(self, release, arch):
596 | if arch == "amd64":
597 | arch = "x86_64"
598 | if arch == "arm64":
599 | arch = "aarch64"
600 | base_url = self.URL + f"/pub/rocky/{release}/images/{arch}"
601 | filename, serial = self.get_latest_release_image(base_url)
602 | log.debug(f"Found image for release '{release}': {filename} ({serial})")
603 | sha256 = self.get_sha256(base_url, filename)
604 | url = base_url + '/' + filename
605 | return {
606 | 'url': url,
607 | 'serial': serial.rstrip('.0'),
608 | 'checksum': sha256,
609 | 'hash_function': 'sha256'
610 | }
611 |
612 | class OpenSUSEHandler(DistroHandler):
613 | URL="https://download.opensuse.org"
614 |
615 | def get_releases(self) -> dict[str, str]:
616 | url = f"{self.URL}/distribution/leap/"
617 | log.debug(f"Lookup for openSUSE Leap releases by url {url}")
618 | r = requests.get(url)
619 | r.raise_for_status()
620 | parser = OpenSUSEVersionParser()
621 | parser.feed(r.content.decode())
622 | parser.close()
623 | log.debug(f"openSUSE versions: {parser.versions}")
624 | releases={v: 'leap' for v in parser.versions}
625 | releases['1.0'] = 'tumbleweed'
626 | return releases
627 |
628 | def get_sha256(self, base_url, filename):
629 | url = f"{base_url}/{filename}.sha256"
630 | r = requests.get(url)
631 | for line in r.content.decode().strip().split("\n"):
632 | if filename in line:
633 | sha256, f = re.split(r"\s+", line)
634 | return sha256
635 | raise fNameError('SHA-256 checksums not found for file ' + filename +
636 | ' at ' + url)
637 |
638 | def get_latest_leap_image(self, url, arch):
639 | r = requests.get(url)
640 | r.raise_for_status()
641 | parser = OpenSUSEImageParser()
642 | parser.feed(r.content.decode())
643 | parser.close()
644 | r = re.compile(r"Cloud-Build([0-9]+\.[0-9]+)\.qcow2$")
645 | for href in parser.urls:
646 | res = r.search(href)
647 | if res and arch in href:
648 | serial=res.group(1)
649 | return href.lstrip('./'), serial
650 | raise NameError('Image not found on server at ' + url)
651 |
652 | def get_latest_tumbleweed_image(self, url, arch):
653 | r = requests.get(url)
654 | r.raise_for_status()
655 | parser = OpenSUSEImageParser()
656 | parser.feed(r.content.decode())
657 | parser.close()
658 | r = re.compile(r"[0-9]+\.[0-9]+\.[0-9]-Cloud-Snapshot([0-9]+)\.qcow2$")
659 | for href in parser.urls:
660 | res = r.search(href)
661 | if res and arch in href and 'Minimal-VM' in href:
662 | serial=res.group(1)
663 | return href.lstrip('./'), serial
664 | raise NameError('Image not found on server at ' + url)
665 |
666 | def __call__(self, release, arch):
667 | if arch == "amd64":
668 | arch = "x86_64"
669 | if arch == "arm64":
670 | arch = "aarch64"
671 | if release == '1.0' or release == '1.0.0':
672 | base_url = self.URL + f"/tumbleweed/appliances"
673 | if arch == "aarch64":
674 | base_url = self.URL + f"/ports/{arch}/tumbleweed/appliances"
675 | filename, serial = self.get_latest_tumbleweed_image(base_url, arch)
676 | else:
677 | base_url = self.URL + f"/distribution/leap/{release}/appliances"
678 | filename, serial = self.get_latest_leap_image(base_url, arch)
679 | log.debug(f"Found image for release '{release}': {filename} ({serial})")
680 | sha256 = self.get_sha256(base_url, filename)
681 | url = base_url + '/' + filename
682 | return {
683 | 'url': url,
684 | 'serial': serial,
685 | 'checksum': sha256,
686 | 'hash_function': 'sha256'
687 | }
688 |
689 |
690 | HANDLERS = {
691 | 'ubuntu': UbuntuHandler(),
692 | 'opensuse': OpenSUSEHandler(),
693 | 'alma': AlmaHandler(),
694 | 'fedora': FedoraHandler(),
695 | 'rocky': RockyHandler(),
696 | 'centos': CentOSHandler(),
697 | }
698 |
699 | def get(distro, distroversion, arch):
700 | if distro in HANDLERS:
701 | handler = HANDLERS[distro]
702 | return handler(distroversion, arch)
703 | r = requests.get(URL)
704 | r.raise_for_status()
705 | parser = Parser()
706 | parser.feed(r.content.decode())
707 | parser.close()
708 | list = parser.filenames
709 | imageprefix = distro + '-' + distroversion + r'-(\d+)'
710 | imagesuffix = '-cloudimg-' + arch + '.(img|raw)'
711 | imagestring = imageprefix + imagesuffix
712 | file = search(imagestring=imagestring, list=list)
713 | if file is not False:
714 | sha512 = requests.get(URL + file + ".sha512")
715 | sha512.raise_for_status()
716 | returndict = {}
717 | returndict['url'] = URL + "/" + file
718 | returndict['serial'] = file.split('-')[2]
719 | returndict['checksum'] = sha512.content.decode().rstrip()
720 | returndict['hash_function'] = 'sha512'
721 | return returndict
722 | else:
723 | raise NameError('Image %s not found on server at %s' % (imagestring, URL))
724 |
725 | def get_distro(args):
726 |
727 | arch = "amd64"
728 | if args.arch:
729 | arch = args.arch
730 |
731 | if arch == "x86_64":
732 | arch = "amd64"
733 |
734 | if arch == "aarch64":
735 | arch == "arm64"
736 |
737 | d=get(args.distro, args.distroversion, arch)
738 | print(f'{d}')
739 |
740 | def add_distro(distro, version, distro_and_versions, codename=None):
741 | # Create dict entry for Distro, append if exists.
742 | if codename:
743 | version = '{version}({codename})'.format(version=version, codename=codename)
744 | try:
745 | distro_and_versions[distro].append(version)
746 | except KeyError:
747 | distro_and_versions[distro] = [version]
748 |
749 | def get_distro_list():
750 | distro_and_versions = {}
751 |
752 | # Non ubuntu distro's
753 | log.debug(f"Lookup images at {URL}")
754 | r = requests.get(URL)
755 | r.raise_for_status()
756 |
757 | # Pull .img filenames from HTML:
758 | parser = Parser()
759 | parser.feed(r.content.decode())
760 | parser.close()
761 | for entry in parser.filenames:
762 | distro,_ = entry.split('-', 1)
763 | # Ignore Ubuntu (we dont pull those from ceph.com)
764 | if not distro in HANDLERS.keys():
765 |
766 | #Ignore sha512 files
767 | if 'sha512' not in entry:
768 | if entry.endswith('.img') or entry.endswith('.raw'):
769 |
770 | # Pull Distro and Version values from Filenames
771 | version = '-'.join(re.split('[0-9]{8}', entry)[0].strip('-').split('-')[1:])
772 | add_distro(distro, str(version), distro_and_versions)
773 |
774 | for distro, handler in HANDLERS.items():
775 | for version, codename in handler.get_releases().items():
776 | add_distro(distro, version, distro_and_versions, codename)
777 |
778 | return distro_and_versions
779 |
780 | def make(parser):
781 | """
782 | Print Available Distributions and Versions.
783 | """
784 | parser.set_defaults(func=print_distros)
785 |
786 | def make_json(parser):
787 | """
788 | Get json formatted distro and version information.
789 | """
790 | parser.set_defaults(func=print_json)
791 |
792 | def make_lookup(parser):
793 | """
794 | Lookup which image is available for the os
795 | """
796 | parser.add_argument(
797 | '--distro',
798 | metavar='DISTRO',
799 | help='Distribution of the image, use "downburst list" to see available',
800 | )
801 | parser.add_argument(
802 | '--distroversion',
803 | metavar='DISTROVERSION',
804 | help='Distribution version of the image, call "downburst list" to see available',
805 | )
806 | parser.add_argument(
807 | '--arch',
808 | metavar='arch',
809 | help='Architecture of the vm (amd64/arm64)',
810 | )
811 |
812 | parser.set_defaults(
813 | func=get_distro,
814 | distro=[],
815 | distroversion=[],
816 | )
817 |
818 | def print_json(parser):
819 | print(json.dumps(get_distro_list()))
820 | return
821 |
822 | def print_distros(parser):
823 | distro_and_versions =get_distro_list()
824 | for distro in sorted(distro_and_versions):
825 | version = distro_and_versions[distro]
826 | print('{distro}: \t {version}'. format(distro=distro,version=version))
827 | return
828 |
829 | def search(imagestring, list):
830 | for imagename in list:
831 | if re.match(imagestring, imagename):
832 | return imagename
833 | return False
834 |
--------------------------------------------------------------------------------