├── scripts
├── __init__.py
├── test
│ ├── test_lock.py
│ ├── test_suite.py
│ ├── test_report.py
│ ├── test_results.py
│ ├── test_exporter_.py
│ ├── test_schedule.py
│ ├── test_dispatcher_.py
│ ├── test_prune_logs.py
│ ├── test_supervisor_.py
│ ├── test_ls.py
│ ├── script.py
│ ├── test_updatekeys.py
│ └── test_run.py
├── ls.py
├── exporter.py
├── wait.py
├── updatekeys.py
├── reimage.py
├── results.py
├── supervisor.py
├── queue.py
├── update_inventory.py
├── prune_logs.py
├── run.py
├── kill.py
├── report.py
├── dispatcher.py
└── node_cleanup.py
├── teuthology
├── lock
│ ├── __init__.py
│ └── test
│ │ ├── __init__.py
│ │ └── test_lock.py
├── util
│ ├── __init__.py
│ ├── compat.py
│ ├── loggerfile.py
│ ├── strtobool.py
│ ├── flock.py
│ ├── test
│ │ ├── files
│ │ │ ├── test_unit_test.xml
│ │ │ └── test_valgrind.xml
│ │ └── test_time.py
│ ├── sentry.py
│ ├── time.py
│ └── version.py
├── orchestra
│ ├── __init__.py
│ ├── test
│ │ ├── __init__.py
│ │ ├── integration
│ │ │ └── __init__.py
│ │ ├── util.py
│ │ ├── files
│ │ │ └── daemon-systemdstate-pid-ps-ef.output
│ │ └── test_systemd.py
│ ├── daemon
│ │ └── __init__.py
│ └── monkey.py
├── openstack
│ ├── test
│ │ ├── __init__.py
│ │ ├── suites
│ │ │ ├── noop
│ │ │ │ ├── +
│ │ │ │ └── noop.yaml
│ │ │ └── nuke
│ │ │ │ └── +
│ │ ├── stop_worker.yaml
│ │ ├── archive-on-error.yaml
│ │ ├── noop.yaml
│ │ ├── user-data-test1.txt
│ │ ├── resources_hint_no_cinder.yaml
│ │ ├── resources_hint.yaml
│ │ └── test_config.py
│ ├── openstack-teuthology.cron
│ ├── openstack-centos-7.3-user-data.txt
│ ├── openstack-debian-7.0-user-data.txt
│ ├── openstack-opensuse-15.1-user-data.txt
│ ├── openstack-buildpackages.yaml
│ ├── openstack-basic.yaml
│ ├── archive-key.pub
│ ├── openstack-centos-9.stream-user-data.txt
│ ├── bootstrap-teuthology.sh
│ ├── openstack-ubuntu-14.04-user-data.txt
│ ├── openstack-ubuntu-16.04-user-data.txt
│ ├── openstack-centos-7.0-user-data.txt
│ ├── openstack-centos-7.1-user-data.txt
│ ├── openstack-centos-7.2-user-data.txt
│ ├── openstack-ubuntu-12.04-user-data.txt
│ ├── openstack-user-data.txt
│ ├── openstack-debian-8.0-user-data.txt
│ ├── openstack-centos-6.5-user-data.txt
│ ├── openstack-sle-12.1-user-data.txt
│ ├── openstack-sle-12.3-user-data.txt
│ ├── openstack-sle-15.0-user-data.txt
│ ├── archive-key
│ ├── openstack-opensuse-15.0-user-data.txt
│ ├── openstack-sle-12.2-user-data.txt
│ ├── openstack-opensuse-42.1-user-data.txt
│ ├── openstack-opensuse-42.3-user-data.txt
│ ├── openstack-opensuse-42.2-user-data.txt
│ └── openstack-sle-15.1-user-data.txt
├── test
│ ├── integration
│ │ └── __init__.py
│ ├── __init__.py
│ ├── test_imports.py
│ ├── test_get_multi_machine_types.py
│ ├── test_parallel.py
│ ├── task
│ │ ├── test_selinux.py
│ │ └── test_internal.py
│ ├── test_schedule.py
│ ├── test_get_distro.py
│ ├── test_safepath.py
│ ├── test_ls.py
│ ├── test_get_distro_version.py
│ ├── test_kill.py
│ └── test_job_status.py
├── task
│ ├── buildpackages
│ │ ├── centos-7.0-user-data.txt
│ │ ├── centos-7.1-user-data.txt
│ │ ├── centos-7.2-user-data.txt
│ │ ├── centos-7.3-user-data.txt
│ │ ├── ubuntu-12.04-user-data.txt
│ │ ├── ubuntu-14.04-user-data.txt
│ │ ├── ubuntu-16.04-user-data.txt
│ │ ├── sle-12.2-user-data.txt
│ │ ├── sle-12.3-user-data.txt
│ │ ├── opensuse-42.3-user-data.txt
│ │ ├── user-data.txt
│ │ ├── debian-8.0-user-data.txt
│ │ ├── opensuse-42.1-user-data.txt
│ │ ├── opensuse-42.2-user-data.txt
│ │ ├── sle-12.1-user-data.txt
│ │ ├── sle-15.0-user-data.txt
│ │ ├── centos-6.5-user-data.txt
│ │ └── opensuse-15.0-user-data.txt
│ ├── internal
│ │ ├── edit_sudoers.sh
│ │ ├── git_ignore_ssl.py
│ │ ├── lock_machines.py
│ │ ├── check_lock.py
│ │ └── vm_setup.py
│ ├── nop.py
│ ├── install
│ │ ├── bin
│ │ │ └── adjust-ulimits
│ │ └── packages.yaml
│ ├── pcp.j2
│ ├── print.py
│ ├── dump_ctx.py
│ ├── sleep.py
│ ├── tests
│ │ ├── test_locking.py
│ │ └── test_run.py
│ ├── interactive.py
│ ├── full_sequential.py
│ ├── loop.py
│ ├── timer.py
│ ├── sequential.py
│ ├── tasktest.py
│ ├── full_sequential_finally.py
│ ├── localdir.py
│ ├── args.py
│ ├── background_exec.py
│ └── parallel.py
├── suite
│ └── test
│ │ ├── conftest.py
│ │ ├── suites
│ │ └── noop
│ │ │ └── noop.yaml
│ │ └── test_placeholder.py
├── templates
│ ├── email-sleep-before-teardown.jinja2
│ └── rocketchat-sleep-before-teardown.jinja2
├── nuke
│ └── __init__.py
├── provision
│ ├── cloud
│ │ ├── test
│ │ │ ├── test_openstack_userdata_conf.yaml
│ │ │ └── test_cloud_init.py
│ │ └── __init__.py
│ └── test
│ │ ├── test_pelagos.py
│ │ └── test_init_provision.py
├── job_status.py
├── safepath.py
├── reimage.py
├── ls.py
└── exit.py
├── .coveragerc
├── .github
├── CODEOWNERS
└── workflows
│ ├── ci.yml
│ ├── dev_container.yml
│ ├── integration.yml
│ └── dependencies.yml
├── .dockerignore
├── docs
├── cephlab.png
├── ChangeLog.rst
├── commands
│ ├── teuthology.rst
│ ├── teuthology-ls.rst
│ ├── teuthology-kill.rst
│ ├── teuthology-lock.rst
│ ├── teuthology-queue.rst
│ ├── teuthology-report.rst
│ ├── teuthology-suite.rst
│ ├── teuthology-wait.rst
│ ├── teuthology-worker.rst
│ ├── teuthology-reimage.rst
│ ├── teuthology-results.rst
│ ├── teuthology-describe.rst
│ ├── teuthology-schedule.rst
│ ├── teuthology-openstack.rst
│ ├── teuthology-prune-logs.rst
│ ├── teuthology-updatekeys.rst
│ ├── teuthology-update-inventory.rst
│ ├── list.rst
│ └── teuthology-dispatcher.rst
├── _themes
│ └── ceph
│ │ ├── theme.conf
│ │ └── static
│ │ └── font
│ │ ├── ApexSans-Book.eot
│ │ ├── ApexSans-Book.ttf
│ │ ├── ApexSans-Book.woff
│ │ ├── ApexSans-Medium.eot
│ │ ├── ApexSans-Medium.ttf
│ │ └── ApexSans-Medium.woff
├── docker-compose
│ ├── testnode
│ │ ├── testnode_sudoers
│ │ ├── testnode_stop.sh
│ │ ├── testnode_start.sh
│ │ └── Dockerfile
│ ├── teuthology
│ │ ├── .teuthology.yaml
│ │ ├── containerized_node.yaml
│ │ ├── Dockerfile
│ │ └── teuthology.sh
│ ├── db
│ │ └── 01-init.sh
│ ├── start.sh
│ └── docker-compose.yml
├── laptop
│ ├── ssh_config
│ ├── default-pool.xml
│ ├── hosts
│ ├── front.xml
│ ├── targets.sql
│ └── teuthology.yaml
├── requirements.txt
├── _static
│ ├── nginx_test_logs
│ ├── nginx_paddles
│ ├── nginx_pulpito
│ ├── worker_start.sh
│ └── create_nodes.py
├── index.rst
└── exporter.rst
├── update-requirements.sh
├── MANIFEST.in
├── watch-suite.sh
├── pytest.ini
├── .gitlab-ci.yml
├── ansible.cfg
├── pyproject.toml
├── .travis.yml
├── systemd
├── teuthology-exporter.service
└── teuthology-dispatcher@.service
├── examples
├── 3node_ceph.yaml
├── parallel_example.yaml
└── 3node_rgw.yaml
├── requirements.yml
├── beanstalk
└── alpine
│ └── Dockerfile
├── containers
└── teuthology-dev
│ ├── .teuthology.yaml
│ ├── containerized_node.yaml
│ ├── Dockerfile
│ └── teuthology.sh
├── .readthedocs.yml
├── openstack-delegate.sh
├── .gitignore
├── README.rst
├── hammer.sh
├── LICENSE
└── tox.ini
/scripts/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/lock/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/util/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/lock/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/orchestra/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/orchestra/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit = */test/*
3 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @ceph/teuthology
2 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/suites/noop/+:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/suites/nuke/+:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/test/integration/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | venv
2 | virtualenv
3 | .tox
4 |
--------------------------------------------------------------------------------
/teuthology/orchestra/test/integration/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-teuthology.cron:
--------------------------------------------------------------------------------
1 | SHELL=/bin/bash
2 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/stop_worker.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.0-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.1-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.2-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.3-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/ubuntu-12.04-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/ubuntu-14.04-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/ubuntu-16.04-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/openstack/test/archive-on-error.yaml:
--------------------------------------------------------------------------------
1 | archive-on-error: true
2 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-12.2-user-data.txt:
--------------------------------------------------------------------------------
1 | sle-12.1-user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-12.3-user-data.txt:
--------------------------------------------------------------------------------
1 | sle-12.1-user-data.txt
--------------------------------------------------------------------------------
/docs/cephlab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/HEAD/docs/cephlab.png
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-42.3-user-data.txt:
--------------------------------------------------------------------------------
1 | opensuse-42.2-user-data.txt
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.3-user-data.txt:
--------------------------------------------------------------------------------
1 | openstack-centos-7.2-user-data.txt
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-debian-7.0-user-data.txt:
--------------------------------------------------------------------------------
1 | openstack-ubuntu-14.04-user-data.txt
--------------------------------------------------------------------------------
/update-requirements.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | pip-compile --extra=test $@ pyproject.toml
4 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-15.1-user-data.txt:
--------------------------------------------------------------------------------
1 | openstack-opensuse-15.0-user-data.txt
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.rst
2 | include requirements.txt
3 | include tox.ini
4 | include pytest.ini
5 |
--------------------------------------------------------------------------------
/docs/ChangeLog.rst:
--------------------------------------------------------------------------------
1 | Changelog
2 | =========
3 |
4 | 0.1.0
5 | -----
6 | * (Actual changelog coming soon)
7 |
--------------------------------------------------------------------------------
/docs/commands/teuthology.rst:
--------------------------------------------------------------------------------
1 | teuthology
2 | ==========
3 |
4 | .. program-output:: teuthology --help
5 |
--------------------------------------------------------------------------------
/teuthology/orchestra/daemon/__init__.py:
--------------------------------------------------------------------------------
1 | from teuthology.orchestra.daemon.group import DaemonGroup # noqa
2 |
--------------------------------------------------------------------------------
/docs/_themes/ceph/theme.conf:
--------------------------------------------------------------------------------
1 | [theme]
2 | inherit = basic
3 | stylesheet = nature.css
4 | pygments_style = tango
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-ls.rst:
--------------------------------------------------------------------------------
1 | teuthology-ls
2 | =============
3 |
4 | .. program-output:: teuthology-ls --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-kill.rst:
--------------------------------------------------------------------------------
1 | teuthology-kill
2 | ===============
3 |
4 | .. program-output:: teuthology-kill --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-lock.rst:
--------------------------------------------------------------------------------
1 | teuthology-lock
2 | ===============
3 |
4 | .. program-output:: teuthology-lock --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-queue.rst:
--------------------------------------------------------------------------------
1 | teuthology-queue
2 | ================
3 |
4 | .. program-output:: teuthology-queue --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-report.rst:
--------------------------------------------------------------------------------
1 | teuthology-report
2 | =================
3 |
4 | .. program-output:: teuthology-report --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-suite.rst:
--------------------------------------------------------------------------------
1 | teuthology-suite
2 | ================
3 |
4 | .. program-output:: teuthology-suite --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-wait.rst:
--------------------------------------------------------------------------------
1 | teuthology-wait
2 | =====================
3 |
4 | .. program-output:: teuthology-wait --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-worker.rst:
--------------------------------------------------------------------------------
1 | teuthology-worker
2 | =================
3 |
4 | .. program-output:: teuthology-worker --help
5 |
--------------------------------------------------------------------------------
/scripts/test/test_lock.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestLock(Script):
5 | script_name = 'teuthology-lock'
6 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-reimage.rst:
--------------------------------------------------------------------------------
1 | teuthology-reimage
2 | ==================
3 |
4 | .. program-output:: teuthology-reimage --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-results.rst:
--------------------------------------------------------------------------------
1 | teuthology-results
2 | ==================
3 |
4 | .. program-output:: teuthology-results --help
5 |
--------------------------------------------------------------------------------
/scripts/test/test_suite.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestSuite(Script):
5 | script_name = 'teuthology-suite'
6 |
--------------------------------------------------------------------------------
/teuthology/suite/test/conftest.py:
--------------------------------------------------------------------------------
1 | from teuthology.config import config
2 |
3 | def pytest_runtest_setup():
4 | config.load({})
5 |
--------------------------------------------------------------------------------
/watch-suite.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | watch "pwd ; echo \`teuthology-ls . | grep -c pass\` passes ; teuthology-ls . | grep -v pass"
4 |
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-describe.rst:
--------------------------------------------------------------------------------
1 | teuthology-describe
2 | ===================
3 |
4 | .. program-output:: teuthology-describe --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-schedule.rst:
--------------------------------------------------------------------------------
1 | teuthology-schedule
2 | ===================
3 |
4 | .. program-output:: teuthology-schedule --help
5 |
--------------------------------------------------------------------------------
/scripts/test/test_report.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestReport(Script):
5 | script_name = 'teuthology-report'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_results.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestResults(Script):
5 | script_name = 'teuthology-results'
6 |
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Book.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/HEAD/docs/_themes/ceph/static/font/ApexSans-Book.eot
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Book.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/HEAD/docs/_themes/ceph/static/font/ApexSans-Book.ttf
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Book.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/HEAD/docs/_themes/ceph/static/font/ApexSans-Book.woff
--------------------------------------------------------------------------------
/docs/commands/teuthology-openstack.rst:
--------------------------------------------------------------------------------
1 | teuthology-openstack
2 | ====================
3 |
4 | .. program-output:: teuthology-openstack --help
5 |
--------------------------------------------------------------------------------
/scripts/test/test_exporter_.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestExporter(Script):
5 | script_name = 'teuthology-exporter'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_schedule.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestSchedule(Script):
5 | script_name = 'teuthology-schedule'
6 |
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Medium.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/HEAD/docs/_themes/ceph/static/font/ApexSans-Medium.eot
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Medium.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/HEAD/docs/_themes/ceph/static/font/ApexSans-Medium.ttf
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Medium.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/HEAD/docs/_themes/ceph/static/font/ApexSans-Medium.woff
--------------------------------------------------------------------------------
/docs/commands/teuthology-prune-logs.rst:
--------------------------------------------------------------------------------
1 | teuthology-prune-logs
2 | =====================
3 |
4 | .. program-output:: teuthology-prune-logs --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-updatekeys.rst:
--------------------------------------------------------------------------------
1 | teuthology-updatekeys
2 | =====================
3 |
4 | .. program-output:: teuthology-updatekeys --help
5 |
--------------------------------------------------------------------------------
/scripts/test/test_dispatcher_.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestDispatcher(Script):
5 | script_name = 'teuthology-dispatcher'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_prune_logs.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestPruneLogs(Script):
5 | script_name = 'teuthology-prune-logs'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_supervisor_.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestSupervisor(Script):
5 | script_name = 'teuthology-supervisor'
6 |
--------------------------------------------------------------------------------
/teuthology/suite/test/suites/noop/noop.yaml:
--------------------------------------------------------------------------------
1 | roles:
2 | - - mon.a
3 | - osd.0
4 | tasks:
5 | - exec:
6 | mon.a:
7 | - echo "Well done !"
8 |
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/testnode_sudoers:
--------------------------------------------------------------------------------
1 | %sudo ALL=(ALL) NOPASSWD: ALL
2 | # For ansible pipelining
3 | Defaults !requiretty
4 | Defaults visiblepw
5 |
--------------------------------------------------------------------------------
/docs/laptop/ssh_config:
--------------------------------------------------------------------------------
1 | Host target-*
2 | User ubuntu
3 | StrictHostKeyChecking no
4 | UserKnownHostsFile /dev/null
5 | LogLevel ERROR
6 |
7 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-update-inventory.rst:
--------------------------------------------------------------------------------
1 | teuthology-update-inventory
2 | ===========================
3 |
4 | .. program-output:: teuthology-update-inventory --help
5 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx >= 5.0.0 # for python 3.10
2 | sphinxcontrib-programoutput
3 | mock == 2.0.0
4 | openstacksdk == 4.5.0
5 | python-openstackclient >= 6.0.0
6 |
--------------------------------------------------------------------------------
/docs/laptop/default-pool.xml:
--------------------------------------------------------------------------------
1 |
2 | default
3 |
4 | /var/lib/libvirt/images/default
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | norecursedirs = .git build virtualenv teuthology.egg-info .tox */integration task/tests
3 | log_cli=true
4 | log_level=NOTSET
5 | addopts = -p no:cacheprovider
6 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | teuthology:
2 | tags: [ ceph-workbench ]
3 | script: "git clean -ffqdx ; ./bootstrap install ; unset OS_AUTH_URL ; source virtualenv/bin/activate ; pip install tox ; tox"
4 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/suites/noop/noop.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | roles:
3 | - - mon.a
4 | - osd.0
5 | tasks:
6 | - exec:
7 | mon.a:
8 | - echo "Well done !"
9 |
10 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | # Store collections in this directory. This is to avoid potential compatibility
3 | # issues between differently-versioned ansible processes.
4 | collections_path = .ansible
5 |
--------------------------------------------------------------------------------
/teuthology/task/internal/edit_sudoers.sh:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 |
3 | sudo vi -e /etc/sudoers <=45",
5 | "wheel",
6 | "setuptools_scm>=6.2",
7 | ]
8 |
9 | [tool.setuptools_scm]
10 | version_scheme = "python-simplified-semver"
--------------------------------------------------------------------------------
/teuthology/openstack/test/noop.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | machine_type: openstack
3 | os_type: ubuntu
4 | os_version: "14.04"
5 | roles:
6 | - - mon.a
7 | - osd.0
8 | tasks:
9 | - exec:
10 | mon.a:
11 | - echo "Well done !"
12 |
13 |
--------------------------------------------------------------------------------
/docs/laptop/hosts:
--------------------------------------------------------------------------------
1 |
2 | # teuthology hosts used as downburst vps targets
3 | 192.168.123.100 target-00 target-00.local
4 | 192.168.123.101 target-01 target-01.local
5 | 192.168.123.102 target-02 target-02.local
6 | 192.168.123.103 target-03 target-03.local
7 |
8 |
--------------------------------------------------------------------------------
/teuthology/lock/test/test_lock.py:
--------------------------------------------------------------------------------
1 | import teuthology.lock.util
2 |
3 | class TestLock(object):
4 |
5 | def test_locked_since_seconds(self):
6 | node = { "locked_since": "2013-02-07 19:33:55.000000" }
7 | assert teuthology.lock.util.locked_since_seconds(node) > 3600
8 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | env: HOME=/home/travis
2 |
3 | sudo: required
4 | dist: trusty
5 |
6 | before_install:
7 | - sudo apt-get -qq update
8 | - ./bootstrap install
9 |
10 | language: python
11 | python:
12 | - 2.7
13 |
14 | install:
15 | - pip install tox
16 |
17 | script: tox -rv
18 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-buildpackages.yaml:
--------------------------------------------------------------------------------
1 | tasks:
2 | - buildpackages:
3 | good_machine:
4 | disk: 100 # GB
5 | ram: 15000 # MB
6 | cpus: 16
7 | min_machine:
8 | disk: 100 # GB
9 | ram: 8000 # MB
10 | cpus: 1
11 |
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/.teuthology.yaml:
--------------------------------------------------------------------------------
1 | queue_host: beanstalk
2 | queue_port: 11300
3 | lock_server: http://paddles:8080
4 | results_server: http://paddles:8080
5 | results_ui_server: http://pulpito:8081/
6 | teuthology_path: /teuthology
7 | archive_base: /archive_dir
8 | reserve_machines: 0
9 | lab_domain: ''
--------------------------------------------------------------------------------
/teuthology/test/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | import sys
4 |
5 | skipif_teuthology_process = pytest.mark.skipif(
6 | os.path.basename(sys.argv[0]) == "teuthology",
7 | reason="Skipped because this test cannot pass when run in a teuthology " \
8 | "process (as opposed to py.test)"
9 | )
--------------------------------------------------------------------------------
/teuthology/openstack/test/user-data-test1.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | system_info:
3 | default_user:
4 | name: ubuntu
5 | final_message: "teuthology is up and running after $UPTIME seconds, substituded variables nworkers=NWORKERS openrc=OPENRC username=TEUTHOLOGY_USERNAME upload=UPLOAD ceph_workbench=CEPH_WORKBENCH clone=CLONE_OPENSTACK"
6 |
--------------------------------------------------------------------------------
/systemd/teuthology-exporter.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Teuthology Exporter
3 |
4 | Wants=ceph.target
5 | After=ceph.target
6 |
7 | [Service]
8 | Type=simple
9 | User=teuthworker
10 | ExecStart=/home/teuthworker/src/git.ceph.com_git_teuthology_main/virtualenv/bin/teuthology-exporter
11 | Restart=on-failure
12 | TimeoutStopSec=60
13 |
--------------------------------------------------------------------------------
/examples/3node_ceph.yaml:
--------------------------------------------------------------------------------
1 | roles:
2 | - [mon.0, mds.0, osd.0]
3 | - [mon.1, osd.1]
4 | - [mon.2, client.0]
5 |
6 | tasks:
7 | - install:
8 | - ceph:
9 | - kclient: [client.0]
10 | - interactive:
11 |
12 | targets:
13 | ubuntu@: ssh-rsa
14 | ubuntu@: ssh-rsa
15 | ubuntu@: ssh-rsa
16 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-dispatcher.rst:
--------------------------------------------------------------------------------
1 | teuthology-dispatcher
2 | =====================
3 |
4 | .. program-output:: teuthology-dispatcher --help
5 |
6 | trouble-shooting notes:
7 | =======================
8 |
9 | - Github unreachable kills dispatcher - The dispatcher might be killed when github becomes unreachable, e.g., https://tracker.ceph.com/issues/54366
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/testnode_stop.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -x
3 | hostname=$(hostname)
4 | payload="{\"name\": \"$hostname\", \"machine_type\": \"testnode\", \"up\": false}"
5 | for i in $(seq 1 5); do
6 | echo "attempt $i"
7 | curl -s -f -X PUT -d "$payload" http://paddles:8080/nodes/$hostname/ && break
8 | sleep 1
9 | done
10 |
--------------------------------------------------------------------------------
/requirements.yml:
--------------------------------------------------------------------------------
1 | ---
2 | collections:
3 | - amazon.aws
4 | - name: ansible.netcommon
5 | version: "<6.0.0" # 6.0 requires ansible-core >= 2.14
6 | - ansible.posix
7 | - name: ansible.utils
8 | version: "<3.0.0" # 3.0 requires ansible-core >= 2.14
9 | - community.docker
10 | - community.general
11 | - community.postgresql
12 |
13 |
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/containerized_node.yaml:
--------------------------------------------------------------------------------
1 | overrides:
2 | ansible.cephlab:
3 | skip_tags: "timezone,nagios,monitoring-scripts,ssh,hostname,pubkeys,zap,sudoers,kerberos,selinux,lvm,ntp-client,resolvconf,packages,cpan,nfs"
4 | vars:
5 | containerized_node: true
6 | ansible_user: root
7 | cm_user: root
8 | start_rpcbind: false
9 |
--------------------------------------------------------------------------------
/docs/docker-compose/db/01-init.sh:
--------------------------------------------------------------------------------
1 | set -e
2 | export PGPASSWORD=$POSTGRES_PASSWORD;
3 | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
4 | CREATE USER $APP_DB_USER WITH PASSWORD '$APP_DB_PASS';
5 | CREATE DATABASE $APP_DB_NAME;
6 | GRANT ALL PRIVILEGES ON DATABASE $APP_DB_NAME TO $APP_DB_USER;
7 | \connect $APP_DB_NAME $APP_DB_USER
8 | EOSQL
--------------------------------------------------------------------------------
/beanstalk/alpine/Dockerfile:
--------------------------------------------------------------------------------
1 | # For beanstalkd 1.12 use edge branch
2 | #FROM alpine:edge
3 |
4 | FROM alpine:3.12.3
5 |
6 | MAINTAINER Kyrylo Shatskyy
7 |
8 | RUN apk update && apk add beanstalkd beanstalkd-doc
9 |
10 | ENV BEANSTALK_ADDR "0.0.0.0"
11 | ENV BEANSTALK_PORT "11300"
12 |
13 | CMD /usr/bin/beanstalkd -V -l $BEANSTALK_ADDR -p $BEANSTALK_PORT
14 |
--------------------------------------------------------------------------------
/docs/_static/nginx_paddles:
--------------------------------------------------------------------------------
1 | server {
2 | server_name paddles.example.com;
3 | proxy_send_timeout 600;
4 | proxy_connect_timeout 240;
5 | location / {
6 | proxy_pass http://paddles.example.com:8080/;
7 | proxy_set_header Host $host;
8 | proxy_set_header X-Real-IP $remote_addr;
9 | }
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/docs/_static/nginx_pulpito:
--------------------------------------------------------------------------------
1 | server {
2 | server_name pulpito.example.com;
3 | proxy_send_timeout 600;
4 | proxy_connect_timeout 240;
5 | location / {
6 | proxy_pass http://pulpito.example.com:8081/;
7 | proxy_set_header Host $host;
8 | proxy_set_header X-Real-IP $remote_addr;
9 | }
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/examples/parallel_example.yaml:
--------------------------------------------------------------------------------
1 | interactive-on-error: true
2 | overrides:
3 | roles:
4 | - - test0
5 | - test1
6 | - - test0
7 | - test1
8 | - - test0
9 | tasks:
10 | - install:
11 | - parallel_example:
12 | - test0
13 | - test1
14 |
15 | targets:
16 | ubuntu@: ssh-rsa
17 | ubuntu@: ssh-rsa
18 | ubuntu@: ssh-rsa
19 |
20 |
21 |
--------------------------------------------------------------------------------
/teuthology/task/install/bin/adjust-ulimits:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # If we're running as root, allow large amounts of open files.
3 | USER=$(whoami)
4 |
5 | # If a ulimit call fails, exit immediately.
6 | set -e
7 |
8 | if [ "$USER" = "root" ]
9 | then
10 | # Enable large number of open files
11 | ulimit -n 65536
12 | fi
13 |
14 | # Enable core dumps for everything
15 | ulimit -c unlimited
16 | exec "$@"
17 |
--------------------------------------------------------------------------------
/scripts/test/test_ls.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | from script import Script
4 | from scripts import ls
5 |
6 | doc = ls.__doc__
7 |
8 |
9 | class TestLs(Script):
10 | script_name = 'teuthology-ls'
11 |
12 | def test_args(self):
13 | args = docopt.docopt(doc, ["--verbose", "some/archive/dir"])
14 | assert args["--verbose"]
15 | assert args[""] == "some/archive/dir"
16 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-basic.yaml:
--------------------------------------------------------------------------------
1 | overrides:
2 | ceph:
3 | conf:
4 | global:
5 | osd heartbeat grace: 100
6 | # this line to address issue #1017
7 | mon lease: 15
8 | mon lease ack timeout: 25
9 | s3tests:
10 | idle_timeout: 1200
11 | ceph-fuse:
12 | client.0:
13 | mount_wait: 60
14 | mount_timeout: 120
15 | archive-on-error: true
16 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/.teuthology.yaml:
--------------------------------------------------------------------------------
1 | queue_host: beanstalk
2 | queue_port: 11300
3 | lock_server: http://paddles:8080
4 | results_server: http://paddles:8080
5 | results_ui_server: http://pulpito:8081/
6 | teuthology_path: /teuthology
7 | archive_base: /archive_dir
8 | reserve_machines: 0
9 | lab_domain: ''
10 |
11 | defaults:
12 | cephadm:
13 | containers:
14 | image: 'quay.ceph.io/ceph-ci/ceph'
15 |
--------------------------------------------------------------------------------
/teuthology/openstack/archive-key.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8vP6xqjfYkv/IyDBMUNOdBVkrePJeWFAmkcuyDCAdYdD/bst/ARsYVViwk0Eae1nAJm8f8rhzQh69udf9jr2qCYBz1mllAshwgXQDyGbBVhFT4ClMAraoC9p1OFZ+JEEDr5IahHLpVXkxZ6r6GqUZ/1rFEqqEQI2Ee4YG4F6ixQ5k44c1XO8Uhun/jzjbJitlQOcPNbslfaRgaLqRoQBh5HypgmBP/3k+bgGC7VXswjfqvHU7/kFI8Xcu8NKedpnIcINm3DMLnSUGdLafHyQo+aZ0oFIQmhN01E8hdn+UyrmnMhZA8OMjw7mltfUElzF8WVn+H5sjq7whfe99MYdb loic@fold
2 |
--------------------------------------------------------------------------------
/teuthology/task/pcp.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | {% if job_id %}job {{ job_id }} {% endif %}performance data
4 |
5 | {% for metric in graphs.keys() %}
6 | {% if mode == 'static' %}
7 | {% set url = graphs[metric].file.split('/')[-1] %}
8 | {% else %}
9 | {% set url = graphs[metric].url %}
10 | {% endif %}
11 | {{ metric }}
12 |
13 |
14 | {% endfor %}
15 |
16 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo 'APT::Get::AllowUnauthenticated "true";' | tee /etc/apt/apt.conf.d/99disablesigs
4 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
5 | manage_etc_hosts: true
6 | preserve_hostname: true
7 | system_info:
8 | default_user:
9 | name: ubuntu
10 | final_message: "READYTORUN"
11 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/containerized_node.yaml:
--------------------------------------------------------------------------------
1 | overrides:
2 | ansible.cephlab:
3 | skip_tags: "timezone,nagios,monitoring-scripts,ssh,hostname,pubkeys,zap,sudoers,kerberos,selinux,lvm,ntp-client,resolvconf,packages,cpan,nfs"
4 | vars:
5 | containerized_node: true
6 | ansible_user: root
7 | cm_user: root
8 | start_rpcbind: false
9 | cephadm:
10 | osd_method: raw
11 | no_cgroups_split: true
12 |
--------------------------------------------------------------------------------
/teuthology/templates/email-sleep-before-teardown.jinja2:
--------------------------------------------------------------------------------
1 | Teuthology job {{ run_name }}/{{ job_id }} has fallen asleep at {{ sleep_date }} for {{ sleep_time }}
2 |
3 | Owner: {{ owner }}
4 | Suite Name: {{ suite_name }}
5 | Sleep Date: {{ sleep_date }}
6 | Sleep Time: {{ sleep_time_sec }} seconds ({{ sleep_time }})
7 | Job Info: {{ job_info }}
8 | Job Logs: {{ job_logs }}
9 | Task Stack: {{ task_stack }}
10 | Current Status: {{ status }}
11 |
--------------------------------------------------------------------------------
/examples/3node_rgw.yaml:
--------------------------------------------------------------------------------
1 | interactive-on-error: true
2 | overrides:
3 | ceph:
4 | branch: main
5 | fs: xfs
6 | roles:
7 | - - mon.a
8 | - mon.c
9 | - osd.0
10 | - - mon.b
11 | - mds.a
12 | - osd.1
13 | - - client.0
14 | tasks:
15 | - install:
16 | - ceph: null
17 | - rgw:
18 | - client.0
19 | - interactive:
20 |
21 | targets:
22 | ubuntu@: ssh-rsa
23 | ubuntu@: ssh-rsa
24 | ubuntu@: ssh-rsa
25 |
--------------------------------------------------------------------------------
/teuthology/templates/rocketchat-sleep-before-teardown.jinja2:
--------------------------------------------------------------------------------
1 | The teuthology job [{{ job_id }}]({{ job_info }}) for suite *{{ suite_name }}* owned by '{{ owner }}' has fallen asleep with status '{{ status }}' at {{ sleep_date }} for __{{ sleep_time }}__ ({{ sleep_time_sec }} seconds).
2 | Open [teuthology.log]({{ job_logs }}teuthology.log) for details, or go to [all logs]({{ job_logs}}).
3 |
4 | Job Description: {{ job_desc }}
5 | Run Name: {{ run_name }}
6 | Task Stack: {{ task_stack }}
7 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | version: 2
6 | formats: []
7 | build:
8 | os: ubuntu-22.04
9 | tools:
10 | python: "3.10"
11 | python:
12 | install:
13 | - method: pip
14 | path: .
15 | extra_requirements:
16 | - orchestra
17 | - requirements: docs/requirements.txt
18 | sphinx:
19 | builder: html
20 | configuration: docs/conf.py
21 |
--------------------------------------------------------------------------------
/scripts/ls.py:
--------------------------------------------------------------------------------
1 | """
2 | usage: teuthology-ls [-h] [-v]
3 |
4 | List teuthology job results
5 |
6 | positional arguments:
7 | path under which to archive results
8 |
9 | optional arguments:
10 | -h, --help show this help message and exit
11 | -v, --verbose show reasons tests failed
12 | """
13 | import docopt
14 | import teuthology.ls
15 |
16 |
17 | def main():
18 | args = docopt.docopt(__doc__)
19 | teuthology.ls.main(args)
20 |
--------------------------------------------------------------------------------
/teuthology/orchestra/test/util.py:
--------------------------------------------------------------------------------
1 | def assert_raises(excClass, callableObj, *args, **kwargs):
2 | """
3 | Like unittest.TestCase.assertRaises, but returns the exception.
4 | """
5 | try:
6 | callableObj(*args, **kwargs)
7 | except excClass as e:
8 | return e
9 | else:
10 | if hasattr(excClass,'__name__'): excName = excClass.__name__
11 | else: excName = str(excClass)
12 | raise AssertionError("%s not raised" % excName)
13 |
--------------------------------------------------------------------------------
/openstack-delegate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | trap "rm -f teuthology-integration.pem ; openstack keypair delete teuthology-integration ; openstack server delete teuthology-integration" EXIT
4 |
5 | openstack keypair create teuthology-integration > teuthology-integration.pem
6 | chmod 600 teuthology-integration.pem
7 | teuthology-openstack --name teuthology-integration --key-filename teuthology-integration.pem --key-name teuthology-integration --suite teuthology/integration --wait --teardown --upload
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | .#*
3 | ## the next line needs to start with a backslash to avoid looking like
4 | ## a comment
5 | \#*#
6 | .*.swp
7 |
8 | *.pyc
9 | *.pyo
10 | .tox
11 |
12 | /*.egg-info
13 | /virtualenv
14 | /build
15 | /*.yaml
16 | docs/build
17 |
18 | .ropeproject
19 | .coverage
20 |
21 | # autogenerated docs from sphinx-apidoc
22 | docs/modules.rst
23 | docs/teuthology.rst
24 | docs/teuthology.*.rst
25 |
26 | # PyCharm
27 | .idea
28 |
29 | # vscode
30 | .vscode/
31 |
32 | .ansible
33 |
--------------------------------------------------------------------------------
/scripts/exporter.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.exporter
4 |
5 | doc = """
6 | usage: teuthology-exporter --help
7 | teuthology-exporter [--interval INTERVAL]
8 |
9 | optional arguments:
10 | -h, --help show this help message and exit
11 | --interval INTERVAL update metrics this often, in seconds
12 | [default: 60]
13 | """
14 |
15 |
16 | def main():
17 | args = docopt.docopt(doc)
18 | teuthology.exporter.main(args)
19 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/debian-8.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo 'APT::Get::AllowUnauthenticated "true";' | tee /etc/apt/apt.conf.d/99disablesigs
4 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
5 | manage_etc_hosts: true
6 | preserve_hostname: true
7 | system_info:
8 | default_user:
9 | name: ubuntu
10 | runcmd:
11 | - echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
12 | final_message: "READYTORUN"
13 |
--------------------------------------------------------------------------------
/scripts/test/script.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | from pytest import raises
3 |
4 |
5 | class Script(object):
6 | script_name = 'teuthology'
7 |
8 | def test_help(self):
9 | args = (self.script_name, '--help')
10 | out = subprocess.check_output(args).decode()
11 | assert out.startswith('usage')
12 |
13 | def test_invalid(self):
14 | args = (self.script_name, '--invalid-option')
15 | with raises(subprocess.CalledProcessError):
16 | subprocess.check_call(args)
17 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Content Index
2 | =============
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | README.rst
8 | intro_testers.rst
9 | fragment_merging.rst
10 | siteconfig.rst
11 | detailed_test_config.rst
12 | openstack_backend.rst
13 | libcloud_backend.rst
14 | downburst_vms.rst
15 | INSTALL.rst
16 | LAB_SETUP.rst
17 | exporter.rst
18 | commands/list.rst
19 | ChangeLog.rst
20 |
21 | Indices and tables
22 | ==================
23 |
24 | * :ref:`genindex`
25 | * :ref:`modindex`
26 | * :ref:`search`
27 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/resources_hint_no_cinder.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | machine_type: openstack
3 | openstack:
4 | - machine:
5 | disk: 10 # GB
6 | ram: 10000 # MB
7 | cpus: 1
8 | volumes:
9 | count: 0
10 | size: 2 # GB
11 | os_type: ubuntu
12 | os_version: "14.04"
13 | roles:
14 | - - mon.a
15 | - osd.0
16 | tasks:
17 | - exec:
18 | mon.a:
19 | - cat /proc/meminfo
20 | - test $(sed -n -e 's/MemTotal.* \([0-9][0-9]*\).*/\1/p' < /proc/meminfo) -ge 10000000 && echo "RAM" "size" "ok"
21 |
--------------------------------------------------------------------------------
/teuthology/util/compat.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | PY3 = False
4 |
5 | if sys.version_info >= (3, 0):
6 | PY3 = True
7 |
8 | if PY3:
9 | from urllib.parse import parse_qs, urljoin, urlparse, urlencode # noqa: F401
10 | from urllib.request import urlopen, Request # noqa: F401
11 | from urllib.error import HTTPError # noqa: F401
12 | else:
13 | from urlparse import parse_qs, urljoin, urlparse # noqa: F401
14 | from urllib import urlencode # noqa: F401
15 | from urllib2 import urlopen, Request, HTTPError # noqa: F401
16 |
17 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | ===================================================
2 | `Teuthology` -- The Ceph integration test framework
3 | ===================================================
4 |
5 |
6 | Welcome! Teuthology's documentation is primarily hosted at `docs.ceph.com
7 | `__.
8 |
9 | You can also look at docs `inside this repository `__, but note that
10 | GitHub's `RST `__ rendering is quite
11 | limited. Mainly that means that links between documents will be broken.
12 |
--------------------------------------------------------------------------------
/teuthology/util/loggerfile.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | class LoggerFile(object):
4 | """
5 | A thin wrapper around a logging.Logger instance that provides a file-like
6 | interface.
7 |
8 | Used by Ansible.execute_playbook() when it calls pexpect.run()
9 | """
10 | def __init__(self, logger: logging.Logger, level: int):
11 | self.logger = logger
12 | self.level = level
13 |
14 | def write(self, string):
15 | self.logger.log(self.level, string.decode('utf-8', 'ignore'))
16 |
17 | def flush(self):
18 | pass
19 |
20 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-42.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | final_message: "READYTORUN"
14 |
--------------------------------------------------------------------------------
/teuthology/util/strtobool.py:
--------------------------------------------------------------------------------
1 | def strtobool(val):
2 | """Convert a string representation of truth to true (1) or false (0).
3 |
4 | True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
5 | are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
6 | 'val' is anything else.
7 | """
8 | val = val.lower()
9 | if val in ('y', 'yes', 't', 'true', 'on', '1'):
10 | return 1
11 | elif val in ('n', 'no', 'f', 'false', 'off', '0'):
12 | return 0
13 | else:
14 | raise ValueError(f"invalid truth value {val!r}")
15 |
--------------------------------------------------------------------------------
/docs/laptop/front.xml:
--------------------------------------------------------------------------------
1 |
2 | front
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/teuthology/task/internal/git_ignore_ssl.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import logging
3 |
4 | from teuthology.orchestra import run
5 |
6 | log = logging.getLogger(__name__)
7 |
8 |
9 | @contextlib.contextmanager
10 | def git_ignore_ssl(ctx, config):
11 | """
12 | Ignore ssl error's while cloning from untrusted http
13 | """
14 |
15 | log.info("ignoring ssl errors while cloning http repo")
16 | ctx.cluster.run(
17 | args=[
18 | 'sudo', 'git', 'config', run.Raw('--system'),
19 | 'http.sslverify', 'false'
20 | ],
21 | )
22 | yield
23 |
--------------------------------------------------------------------------------
/hammer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh -ex
2 | #
3 | # simple script to repeat a test until it fails
4 | #
5 |
6 | if [ $1 = "-a" ]; then
7 | shift
8 | job=$1
9 | log="--archive $job.out"
10 | else
11 | job=$1
12 | log=""
13 | fi
14 |
15 | test -e $1
16 |
17 | title() {
18 | echo '\[\033]0;hammer '$job' '$N' passes\007\]'
19 | }
20 |
21 | N=0
22 | title
23 | [ -n "$log" ] && [ -d $job.out ] && rm -rf $job.out
24 | while teuthology $log $job $2 $3 $4
25 | do
26 | date
27 | N=$(($N+1))
28 | echo "$job: $N passes"
29 | [ -n "$log" ] && rm -rf $job.out
30 | title
31 | done
32 | echo "$job: $N passes, then failure."
33 |
--------------------------------------------------------------------------------
/systemd/teuthology-dispatcher@.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Teuthology Dispatcher
3 |
4 | Wants=ceph.target
5 | After=ceph.target
6 |
7 | [Service]
8 | Type=simple
9 | User=teuthworker
10 | ExecStart=/home/teuthworker/src/git.ceph.com_git_teuthology_main/virtualenv/bin/python3 \
11 | /home/teuthworker/src/git.ceph.com_git_teuthology_main/virtualenv/bin/teuthology-dispatcher \
12 | -v \
13 | --archive-dir /home/teuthworker/archive \
14 | --tube %i \
15 | --log-dir /home/teuthworker/archive/worker_logs
16 | ExecStop=touch /tmp/teuthology-stop-dispatcher
17 | Restart=on-failure
18 | TimeoutStopSec=infinity
19 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-42.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - 'zypper rr openSUSE-Leap-Cloud-Tools || :'
14 | final_message: "READYTORUN"
15 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-9.stream-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - hostnamectl set-hostname $(cat /etc/hostname)
4 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
5 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
6 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
7 | preserve_hostname: true
8 | system_info:
9 | default_user:
10 | name: {username}
11 | packages:
12 | - python3
13 | - wget
14 | - git
15 | - ntp
16 | final_message: "{up}, after $UPTIME seconds"
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-12.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog lsb-release
14 | final_message: "READYTORUN"
15 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - zypper --non-interactive --no-gpg-checks install --no-recommends wget git-core rsyslog lsb-release
14 | final_message: "READYTORUN"
15 |
--------------------------------------------------------------------------------
/teuthology/task/print.py:
--------------------------------------------------------------------------------
1 | """
2 | Print task
3 |
4 | A task that logs whatever is given to it as an argument. Can be used
5 | like any other task (under sequential, etc...).j
6 |
7 | For example, the following would cause the strings "String" and "Another
8 | string" to appear in the teuthology.log before and after the chef task
9 | runs, respectively.
10 |
11 | tasks:
12 | - print: "String"
13 | - chef: null
14 | - print: "Another String"
15 | """
16 |
17 | import logging
18 |
19 | log = logging.getLogger(__name__)
20 |
21 | def task(ctx, config):
22 | """
23 | Print out config argument in teuthology log/output
24 | """
25 | log.info('{config}'.format(config=config))
26 |
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/testnode_start.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -x
3 | echo "$SSH_PUBKEY" > /root/.ssh/authorized_keys
4 | echo "$SSH_PUBKEY" > /home/ubuntu/.ssh/authorized_keys
5 | chown ubuntu /home/ubuntu/.ssh/authorized_keys
6 | . /etc/os-release
7 | if [ $ID = 'centos' ]; then
8 | VERSION_ID=${VERSION_ID}.stream
9 | fi
10 | payload="{\"name\": \"$(hostname)\", \"machine_type\": \"testnode\", \"up\": true, \"locked\": false, \"os_type\": \"${ID}\", \"os_version\": \"${VERSION_ID}\"}"
11 | for i in $(seq 1 5); do
12 | echo "attempt $i"
13 | curl -v -f -d "$payload" http://paddles:8080/nodes/ && break
14 | sleep 1
15 | done
16 | mkdir -p /run/sshd
17 | exec /usr/sbin/sshd -D
18 |
--------------------------------------------------------------------------------
/teuthology/task/dump_ctx.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import pprint
3 |
4 | log = logging.getLogger(__name__)
5 | pp = pprint.PrettyPrinter(indent=4)
6 |
7 | def _pprint_me(thing, prefix):
8 | return prefix + "\n" + pp.pformat(thing)
9 |
10 | def task(ctx, config):
11 | """
12 | Dump task context and config in teuthology log/output
13 |
14 | The intended use case is didactic - to provide an easy way for newbies, who
15 | are working on teuthology tasks for the first time, to find out what
16 | is inside the ctx and config variables that are passed to each task.
17 | """
18 | log.info(_pprint_me(ctx, "Task context:"))
19 | log.info(_pprint_me(config, "Task config:"))
20 |
--------------------------------------------------------------------------------
/teuthology/orchestra/test/files/daemon-systemdstate-pid-ps-ef.output:
--------------------------------------------------------------------------------
1 | ceph 658 1 0 Jun08 ? 00:07:43 /usr/bin/ceph-mgr -f --cluster ceph --id host1 --setuser ceph --setgroup ceph
2 | ceph 1634 1 0 Jun08 ? 00:02:17 /usr/bin/ceph-mds -f --cluster ceph --id host1 --setuser ceph --setgroup ceph
3 | ceph 31555 1 0 Jun08 ? 01:13:50 /usr/bin/ceph-mon -f --cluster ceph --id host1 --setuser ceph --setgroup ceph
4 | ceph 31765 1 0 Jun08 ? 00:48:42 /usr/bin/radosgw -f --cluster ceph --name client.rgw.host1.rgw0 --setuser ceph --setgroup ceph
5 | ceph 97427 1 0 Jun17 ? 00:41:39 /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser ceph --setgroup ceph
--------------------------------------------------------------------------------
/teuthology/task/sleep.py:
--------------------------------------------------------------------------------
1 | """
2 | Sleep task
3 | """
4 | import logging
5 | import time
6 |
7 | log = logging.getLogger(__name__)
8 |
9 |
10 | def task(ctx, config):
11 | """
12 | Sleep for some number of seconds.
13 |
14 | Example::
15 |
16 |
17 | tasks:
18 | - install:
19 | - ceph:
20 | - sleep:
21 | duration: 10
22 | - interactive:
23 |
24 | :param ctx: Context
25 | :param config: Configuration
26 | """
27 | if not config:
28 | config = {}
29 | assert isinstance(config, dict)
30 | duration = int(config.get('duration', 5))
31 | log.info('Sleeping for {} seconds'.format(duration))
32 | time.sleep(duration)
33 |
--------------------------------------------------------------------------------
/docs/laptop/targets.sql:
--------------------------------------------------------------------------------
1 | begin;
2 | insert into nodes (name, machine_type, is_vm, locked, up) values ('localhost', 'libvirt', false, true, true);
3 | insert into nodes (name, machine_type, is_vm, locked, up, mac_address, vm_host_id) values
4 | ('target-00.local', 'vps', true, false, false, '52:54:00:00:00:00', (select id from nodes where name='localhost')),
5 | ('target-01.local', 'vps', true, false, false, '52:54:00:00:00:01', (select id from nodes where name='localhost')),
6 | ('target-02.local', 'vps', true, false, false, '52:54:00:00:00:02', (select id from nodes where name='localhost')),
7 | ('target-03.local', 'vps', true, false, false, '52:54:00:00:00:03', (select id from nodes where name='localhost'));
8 | commit;
9 |
--------------------------------------------------------------------------------
/teuthology/util/flock.py:
--------------------------------------------------------------------------------
1 | import fcntl
2 |
3 |
4 | class FileLock(object):
5 | def __init__(self, filename, noop=False):
6 | self.filename = filename
7 | self.file = None
8 | self.noop = noop
9 |
10 | def __enter__(self):
11 | if not self.noop:
12 | assert self.file is None
13 | self.file = open(self.filename, 'w')
14 | fcntl.lockf(self.file, fcntl.LOCK_EX)
15 | return self
16 |
17 | def __exit__(self, exc_type, exc_val, exc_tb):
18 | if not self.noop:
19 | assert self.file is not None
20 | fcntl.lockf(self.file, fcntl.LOCK_UN)
21 | self.file.close()
22 | self.file = None
23 |
--------------------------------------------------------------------------------
/teuthology/util/test/files/test_unit_test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/teuthology/nuke/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | log = logging.getLogger(__name__)
4 |
5 |
6 | # This is being kept because ceph.git/qa/tasks/cephfs/filesystem.py references it.
7 | def clear_firewall(ctx):
8 | """
9 | Remove any iptables rules created by teuthology. These rules are
10 | identified by containing a comment with 'teuthology' in it. Non-teuthology
11 | firewall rules are unaffected.
12 | """
13 | log.info("Clearing teuthology firewall rules...")
14 | ctx.cluster.run(
15 | args=[
16 | "sudo", "sh", "-c",
17 | "iptables-save | grep -v teuthology | iptables-restore"
18 | ],
19 | )
20 | log.info("Cleared teuthology firewall rules.")
21 |
--------------------------------------------------------------------------------
/teuthology/task/install/packages.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ceph:
3 | deb:
4 | - ceph
5 | - ceph-mds
6 | - ceph-common
7 | - ceph-fuse
8 | - ceph-test
9 | - radosgw
10 | - python3-rados
11 | - python3-rgw
12 | - python3-cephfs
13 | - python3-rbd
14 | - libcephfs2
15 | - librados2
16 | - librbd1
17 | - rbd-fuse
18 | - ceph-dbg
19 | - ceph-mds-dbg
20 | - ceph-common-dbg
21 | - ceph-fuse-dbg
22 | - radosgw-dbg
23 | - libcephfs2-dbg
24 | - librados2-dbg
25 | - librbd1-dbg
26 | rpm:
27 | - ceph-radosgw
28 | - ceph-test
29 | - ceph
30 | - ceph-fuse
31 | - libcephfs2
32 | - librados2
33 | - librbd1
34 | - python3-rados
35 | - python3-rgw
36 | - python3-cephfs
37 | - python3-rbd
38 | - rbd-fuse
39 | - ceph-debuginfo
40 |
--------------------------------------------------------------------------------
/scripts/test/test_updatekeys.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 | import subprocess
3 | from pytest import raises
4 | from pytest import skip
5 |
6 |
7 | class TestUpdatekeys(Script):
8 | script_name = 'teuthology-updatekeys'
9 |
10 | def test_invalid(self):
11 | skip("teuthology.lock needs to be partially refactored to allow" +
12 | "teuthology-updatekeys to return nonzero in all erorr cases")
13 |
14 | def test_all_and_targets(self):
15 | args = (self.script_name, '-a', '-t', 'foo')
16 | with raises(subprocess.CalledProcessError):
17 | subprocess.check_call(args)
18 |
19 | def test_no_args(self):
20 | with raises(subprocess.CalledProcessError):
21 | subprocess.check_call(self.script_name)
22 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-6.5-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - yum install -y yum-utils && yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/6/x86_64/ && yum install --nogpgcheck -y epel-release && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 && rm /etc/yum.repos.d/dl.fedoraproject.org*
4 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
5 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo
6 | preserve_hostname: true
7 | system_info:
8 | default_user:
9 | name: ubuntu
10 | packages:
11 | - dracut-modules-growroot
12 | runcmd:
13 | - mkinitrd --force /boot/initramfs-2.6.32-573.3.1.el6.x86_64.img 2.6.32-573.3.1.el6.x86_64
14 | - reboot
15 | final_message: "READYTORUN"
16 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/resources_hint.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | machine_type: openstack
3 | openstack:
4 | - machine:
5 | disk: 10 # GB
6 | ram: 10000 # MB
7 | cpus: 1
8 | volumes:
9 | count: 1
10 | size: 2 # GB
11 | os_type: ubuntu
12 | os_version: "14.04"
13 | roles:
14 | - - mon.a
15 | - osd.0
16 | tasks:
17 | - exec:
18 | mon.a:
19 | - test $(sed -n -e 's/MemTotal.* \([0-9][0-9]*\).*/\1/p' < /proc/meminfo) -ge 10000000 && echo "RAM" "size" "ok"
20 | - cat /proc/meminfo
21 | # wait for the attached volume to show up
22 | - for delay in 1 2 4 8 16 32 64 128 256 512 ; do if test -e /sys/block/vdb/size ; then break ; else sleep $delay ; fi ; done
23 | # 4000000 because 512 bytes sectors
24 | - test $(cat /sys/block/vdb/size) -gt 4000000 && echo "Disk" "size" "ok"
25 | - cat /sys/block/vdb/size
26 |
--------------------------------------------------------------------------------
/scripts/wait.py:
--------------------------------------------------------------------------------
1 | import docopt
2 | import sys
3 |
4 | import logging
5 |
6 | import teuthology
7 | import teuthology.suite
8 | from teuthology.config import config
9 |
10 | doc = """
11 | usage: teuthology-wait --help
12 | teuthology-wait [-v] --run
13 |
14 | Wait until run is finished. Returns exit code 0 on success, otherwise 1.
15 |
16 | Miscellaneous arguments:
17 | -h, --help Show this help message and exit
18 | -v, --verbose Be more verbose
19 |
20 | Standard arguments:
21 | -r, --run Run name to watch.
22 | """
23 |
24 |
25 | def main(argv=sys.argv[1:]):
26 | args = docopt.docopt(doc, argv=argv)
27 | if args.get('--verbose'):
28 | teuthology.log.setLevel(logging.DEBUG)
29 | name = args.get('--run')
30 | return teuthology.suite.wait(name, config.max_job_time, None)
31 |
32 |
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 | ENV DEBIAN_FRONTEND=noninteractive
3 | RUN apt update && \
4 | apt -y install \
5 | sudo \
6 | openssh-server \
7 | hostname \
8 | curl \
9 | python3-pip \
10 | apache2 \
11 | nfs-kernel-server && \
12 | apt clean all
13 | COPY testnode_start.sh /
14 | COPY testnode_stop.sh /
15 | COPY testnode_sudoers /etc/sudoers.d/teuthology
16 | RUN \
17 | ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key -N '' && \
18 | sed -i 's/#PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config && \
19 | mkdir -p /root/.ssh && \
20 | chmod 700 /root/.ssh && \
21 | useradd -g sudo ubuntu && \
22 | mkdir -p /home/ubuntu/.ssh && \
23 | chmod 700 /home/ubuntu/.ssh && \
24 | chown -R ubuntu /home/ubuntu
25 | EXPOSE 22
26 | ENTRYPOINT /testnode_start.sh
27 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - zypper --non-interactive --no-gpg-checks rm gettext-runtime-mini grub2 grub2-branding-openSUSE grub2-i386-pc grub2-snapper-plugin grub2-systemd-sleep-plugin
14 | - zypper --non-interactive --no-gpg-checks install --no-recommends wget git-core rsyslog lsb-release make gcc gcc-c++ grub2 rpm-build
15 | - sleep 30
16 | final_message: "READYTORUN"
17 |
--------------------------------------------------------------------------------
/teuthology/test/test_imports.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import pytest
3 | import sys
4 |
5 | from pathlib import Path
6 | from typing import List
7 |
8 | root = Path("./teuthology")
9 |
10 |
11 | def find_modules() -> List[str]:
12 | modules = []
13 | for path in root.rglob("*.py"):
14 | if path.name.startswith("test_"):
15 | continue
16 | if "-" in path.name:
17 | continue
18 | if path.name == "__init__.py":
19 | path = path.parent
20 |
21 | path_name = str(path).replace("/", ".")
22 | if path_name.endswith(".py"):
23 | path_name = path_name[:-3]
24 | modules.append(path_name)
25 | return sorted(modules)
26 |
27 |
28 | @pytest.mark.parametrize("module", find_modules())
29 | def test_import_modules(module):
30 | importlib.import_module(module)
31 | assert module in sys.modules
32 |
--------------------------------------------------------------------------------
/teuthology/provision/cloud/test/test_openstack_userdata_conf.yaml:
--------------------------------------------------------------------------------
1 | libcloud:
2 | providers:
3 | my_provider:
4 | allow_networks:
5 | - sesci
6 | userdata:
7 | 'ubuntu-16.04':
8 | bootcmd:
9 | - 'SuSEfirewall2 stop || true'
10 | - 'service firewalld stop || true'
11 | runcmd:
12 | - 'uptime'
13 | - 'date'
14 | - 'zypper in -y lsb-release make gcc gcc-c++ chrony || true'
15 | - 'systemctl enable chronyd.service || true'
16 | - 'systemctl start chronyd.service || true'
17 | ssh_authorized_keys:
18 | - user_public_key1
19 | - user_public_key2
20 | driver: openstack
21 | driver_args:
22 | username: user
23 | password: password
24 | ex_force_auth_url: 'http://127.0.0.1:9999/v2.0/tokens'
25 |
--------------------------------------------------------------------------------
/teuthology/test/test_get_multi_machine_types.py:
--------------------------------------------------------------------------------
1 | from teuthology import misc as teuthology
2 |
3 | class Mock: pass
4 |
5 | class TestGetMultiMachineTypes(object):
6 |
7 | def test_space(self):
8 | give = 'burnupi plana vps'
9 | expect = ['burnupi','plana','vps']
10 | assert teuthology.get_multi_machine_types(give) == expect
11 |
12 | def test_tab(self):
13 | give = 'burnupi plana vps'
14 | expect = ['burnupi','plana','vps']
15 | assert teuthology.get_multi_machine_types(give) == expect
16 |
17 | def test_comma(self):
18 | give = 'burnupi,plana,vps'
19 | expect = ['burnupi','plana','vps']
20 | assert teuthology.get_multi_machine_types(give) == expect
21 |
22 | def test_single(self):
23 | give = 'burnupi'
24 | expect = ['burnupi']
25 | assert teuthology.get_multi_machine_types(give) == expect
26 |
27 |
28 |
--------------------------------------------------------------------------------
/teuthology/test/test_parallel.py:
--------------------------------------------------------------------------------
1 | from teuthology.parallel import parallel
2 |
3 |
4 | def identity(item, input_set=None, remove=False):
5 | if input_set is not None:
6 | assert item in input_set
7 | if remove:
8 | input_set.remove(item)
9 | return item
10 |
11 |
12 | class TestParallel(object):
13 | def test_basic(self):
14 | in_set = set(range(10))
15 | with parallel() as para:
16 | for i in in_set:
17 | para.spawn(identity, i, in_set, remove=True)
18 | assert para.any_spawned is True
19 | assert para.count == len(in_set)
20 |
21 | def test_result(self):
22 | in_set = set(range(10))
23 | with parallel() as para:
24 | for i in in_set:
25 | para.spawn(identity, i, in_set)
26 | for result in para:
27 | in_set.remove(result)
28 |
29 |
--------------------------------------------------------------------------------
/docs/laptop/teuthology.yaml:
--------------------------------------------------------------------------------
1 | # replace $HOME with whatever appropriate to your needs
2 | # teuthology-lock
3 | lab_domain: local
4 | lock_server: http://localhost:80
5 | default_machine_type: vps
6 | # teuthology-run
7 | results_server: http://localhost:80
8 | # we do not need reserve_machines on localhost
9 | reserve_machines: 0
10 | # point to your teuthology
11 | teuthology_path: $HOME/teuthology
12 | # beanstalkd
13 | queue_host: localhost
14 | queue_port: 11300
15 | # if you want make and test patches to ceph-cm-ansible
16 | # ceph_cm_ansible_git_url: $HOME/ceph-cm-ansible
17 | # customize kvm guests parameter
18 | downburst:
19 | path: $HOME/downburst/virtualenv/bin/downburst
20 | discover_url: http://localhost:8181/images/ibs/
21 | machine:
22 | cpus: 2
23 | disk: 12G
24 | ram: 2G
25 | volumes:
26 | size: 8G
27 | count: 4
28 | check_package_signatures: false
29 | suite_verify_ceph_hash: false
30 |
31 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | test:
11 | name: CI on python${{ matrix.python }} via ${{ matrix.os }}
12 | runs-on: ${{ matrix.os }}
13 | strategy:
14 | matrix:
15 | include:
16 | - os: ubuntu-22.04
17 | python: "3.10"
18 | - os: ubuntu-22.04
19 | python: "3.11"
20 | - os: ubuntu-24.04
21 | python: "3.12"
22 | steps:
23 | - uses: actions/checkout@v4
24 | - name: Setup Python
25 | uses: actions/setup-python@v5
26 | with:
27 | python-version: ${{ matrix.python }}
28 | - name: Install tox
29 | run: pip install tox
30 | - name: Run flake8
31 | run: tox -e flake8
32 | - name: Run unit tests
33 | run: tox -e py3
34 | - name: Run docs build
35 | run: tox -e docs
36 |
--------------------------------------------------------------------------------
/scripts/updatekeys.py:
--------------------------------------------------------------------------------
1 | import docopt
2 | import sys
3 |
4 | import teuthology.lock
5 | import teuthology.lock.cli
6 |
7 | doc = """
8 | usage: teuthology-updatekeys -h
9 | teuthology-updatekeys [-v] -t
10 | teuthology-updatekeys [-v] ...
11 | teuthology-updatekeys [-v] -a
12 |
13 | Update any hostkeys that have changed. You can list specific machines to run
14 | on, or use -a to check all of them automatically.
15 |
16 | positional arguments:
17 | MACHINES hosts to check for updated keys
18 |
19 | optional arguments:
20 | -h, --help Show this help message and exit
21 | -v, --verbose Be more verbose
22 | -t , --targets
23 | Input yaml containing targets to check
24 | -a, --all Update hostkeys of all machines in the db
25 | """
26 |
27 |
28 | def main():
29 | args = docopt.docopt(doc)
30 | status = teuthology.lock.cli.updatekeys(args)
31 | sys.exit(status)
32 |
--------------------------------------------------------------------------------
/teuthology/openstack/bootstrap-teuthology.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -ex
2 | TEUTH_PATH=${1:-"teuthology"}
3 | TEUTH_GIT=${2:-"https://github.com/ceph/teuthology"}
4 | TEUTH_BRANCH=${3:-"main"}
5 |
6 | mkdir -p $TEUTH_PATH
7 | git init $TEUTH_PATH
8 |
9 | pushd $TEUTH_PATH
10 |
11 | echo Fetch upstream changes from $TEUTH_GIT
12 | git fetch --tags --progress $TEUTH_GIT +refs/heads/*:refs/remotes/origin/*
13 | git config remote.origin.url $TEUTH_GIT
14 | git config --add remote.origin.fetch +refs/heads/*:refs/remotes/origin/*
15 | git config remote.origin.url $TEUTH_GIT
16 |
17 | # Check if branch has form origin/pr/*/merge
18 | isPR="^origin\/pr\/"
19 | if [[ "$TEUTH_BRANCH" =~ $isPR ]] ; then
20 |
21 | git fetch --tags --progress https://github.com/suse/teuthology +refs/pull/*:refs/remotes/origin/pr/*
22 | rev=$(git rev-parse refs/remotes/$TEUTH_BRANCH^{commit})
23 |
24 | git config core.sparsecheckout
25 | git checkout -f $rev
26 | else
27 | git checkout $TEUTH_BRANCH
28 | fi
29 |
30 | ./bootstrap install
31 |
32 | popd
33 |
34 |
--------------------------------------------------------------------------------
/scripts/reimage.py:
--------------------------------------------------------------------------------
1 | import docopt
2 | import sys
3 |
4 | import teuthology.reimage
5 |
6 | doc = """
7 | usage: teuthology-reimage --help
8 | teuthology-reimage --os-type distro --os-version version [options] ...
9 |
10 | Reimage nodes without locking using specified distro type and version.
11 | The nodes must be locked by the current user, otherwise an error occurs.
12 | Custom owner can be specified in order to provision someone else nodes.
13 | Reimaging unlocked nodes cannot be provided.
14 |
15 | Standard arguments:
16 | -h, --help Show this help message and exit
17 | -v, --verbose Be more verbose
18 | --os-type Distro type like: rhel, ubuntu, etc.
19 | --os-version Distro version like: 7.6, 16.04, etc.
20 | --owner user@host Owner of the locked machines
21 | """
22 |
23 | def main(argv=sys.argv[1:]):
24 | args = docopt.docopt(doc, argv=argv)
25 | return teuthology.reimage.main(args)
26 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-ubuntu-14.04-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( wget -qO - http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(wget -qO - http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | manage_etc_hosts: true
12 | preserve_hostname: true
13 | system_info:
14 | default_user:
15 | name: {username}
16 | packages:
17 | - python
18 | - wget
19 | - git
20 | - ntp
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-ubuntu-16.04-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( wget -qO - http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(wget -qO - http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | manage_etc_hosts: true
12 | preserve_hostname: true
13 | system_info:
14 | default_user:
15 | name: {username}
16 | packages:
17 | - python
18 | - wget
19 | - git
20 | - ntp
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/task/tests/test_locking.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | class TestLocking(object):
5 |
6 | def test_correct_os_type(self, ctx, config):
7 | os_type = ctx.config.get("os_type")
8 | if os_type is None:
9 | pytest.skip('os_type was not defined')
10 | for remote in ctx.cluster.remotes.keys():
11 | assert remote.os.name == os_type
12 |
13 | def test_correct_os_version(self, ctx, config):
14 | os_version = ctx.config.get("os_version")
15 | if os_version is None:
16 | pytest.skip('os_version was not defined')
17 | if ctx.config.get("os_type") == "debian":
18 | pytest.skip('known issue with debian versions; see: issue #10878')
19 | for remote in ctx.cluster.remotes.keys():
20 | assert remote.inventory_info['os_version'] == os_version
21 |
22 | def test_correct_machine_type(self, ctx, config):
23 | machine_type = ctx.machine_type
24 | for remote in ctx.cluster.remotes.keys():
25 | assert remote.machine_type in machine_type
26 |
--------------------------------------------------------------------------------
/scripts/results.py:
--------------------------------------------------------------------------------
1 | """
2 | usage: teuthology-results [-h] [-v] [--dry-run] [--email EMAIL] [--timeout TIMEOUT] --archive-dir DIR --name NAME [--subset SUBSET] [--seed SEED] [--no-nested-subset]
3 |
4 | Email teuthology suite results
5 |
6 | optional arguments:
7 | -h, --help show this help message and exit
8 | -v, --verbose be more verbose
9 | --dry-run Instead of sending the email, just print it
10 | --email EMAIL address to email test failures to
11 | --timeout TIMEOUT how many seconds to wait for all tests to finish
12 | [default: 0]
13 | --archive-dir DIR path under which results for the suite are stored
14 | --name NAME name of the suite
15 | --subset SUBSET subset passed to teuthology-suite
16 | --seed SEED random seed used in teuthology-suite
17 | --no-nested-subset disable nested subsets used in teuthology-suite
18 | """
19 | import docopt
20 | import teuthology.results
21 |
22 |
23 | def main():
24 | args = docopt.docopt(__doc__)
25 | teuthology.results.main(args)
26 |
--------------------------------------------------------------------------------
/teuthology/util/test/files/test_valgrind.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 0x870fc
5 | 1
6 | Leak_DefinitelyLost
7 |
8 | 1,234 bytes in 1 blocks are definitely lost in loss record 198 of 201
9 | 1234
10 | 1
11 |
12 |
13 |
14 | 0x4C39B6F
15 | /usr/libexec/valgrind/vgpreload_memcheck-amd64-linux.so
16 | operator new[](unsigned long)
17 | /builddir/build/BUILD/valgrind-3.19.0/coregrind/m_replacemalloc
18 | vg_replace_malloc.c
19 | 640
20 |
21 |
22 | 0xF3F4B5
23 | /usr/bin/ceph-osd
24 | ceph::common::leak_some_memory()
25 | /usr/src/debug/ceph-18.0.0-5567.g64a4fc94.el8.x86_64/src/common
26 | ceph_context.cc
27 | 510
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - redhat-lsb-core
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - redhat-lsb-core
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - redhat-lsb-core
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/docs/_static/worker_start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # A simple script used by Red Hat to start teuthology-worker processes.
4 |
5 | ARCHIVE=${ARCHIVE:-"$HOME/archive"}
6 | WORKER_LOGS=$ARCHIVE/worker_logs
7 |
8 | function start_workers_for_tube {
9 | echo "Starting $2 workers for $1"
10 | for i in `seq 1 $2`
11 | do
12 | teuthology-worker -v --archive-dir $ARCHIVE --tube $1 --log-dir $WORKER_LOGS &
13 | done
14 | }
15 |
16 | function start_all {
17 | start_workers_for_tube plana 50
18 | start_workers_for_tube mira 50
19 | start_workers_for_tube vps 80
20 | start_workers_for_tube burnupi 10
21 | start_workers_for_tube tala 5
22 | start_workers_for_tube saya 10
23 | start_workers_for_tube multi 100
24 | }
25 |
26 | function main {
27 | printf '%s\n' "$*"
28 | if [[ -z "$*" ]]
29 | then
30 | start_all
31 | elif [ ! -z "$2" ] && [ "$2" -gt "0" ]
32 | then
33 | start_workers_for_tube $1 $2
34 | else
35 | echo "usage: $0 [tube_name number_of_workers]" >&2
36 | exit 1
37 | fi
38 | }
39 |
40 | main "$@"
41 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014 Red Hat, Inc.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-ubuntu-12.04-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | preserve_hostname: true
12 | manage_etc_hosts: true
13 | system_info:
14 | default_user:
15 | name: {username}
16 | packages:
17 | - python
18 | - wget
19 | - git
20 | - ntp
21 | runcmd:
22 | - dpkg -l python wget git ntp >> /var/log/cloud-init-output.log
23 | - echo "{up}" >> /var/log/cloud-init-output.log
24 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - touch /tmp/init.out
4 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
5 | manage_etc_hosts: true
6 | system_info:
7 | default_user:
8 | name: TEUTHOLOGY_USERNAME
9 | packages:
10 | - python-virtualenv
11 | - git
12 | - rsync
13 | runcmd:
14 | - su - -c '(set -x ; CLONE_OPENSTACK && cd teuthology && ./bootstrap install)' TEUTHOLOGY_USERNAME >> /tmp/init.out 2>&1
15 | - echo 'export OPENRC' | tee /home/TEUTHOLOGY_USERNAME/openrc.sh
16 | - su - -c '(set -x ; source openrc.sh ; cd teuthology ; source virtualenv/bin/activate ; teuthology/openstack/setup-openstack.sh --nworkers NWORKERS UPLOAD CEPH_WORKBENCH CANONICAL_TAGS SETUP_OPTIONS)' TEUTHOLOGY_USERNAME >> /tmp/init.out 2>&1
17 | # wa: we want to stop paddles and pulpito started by setup-openstack, before start teuthology service
18 | - pkill -f 'pecan serve'
19 | - pkill -f 'python run.py'
20 | - systemctl enable teuthology
21 | - systemctl start teuthology
22 | final_message: "teuthology is up and running after $UPTIME seconds"
23 |
--------------------------------------------------------------------------------
/teuthology/job_status.py:
--------------------------------------------------------------------------------
1 | def get_status(summary):
2 | """
3 | :param summary: The job summary dict. Normally ctx.summary
4 | :returns: A status string like 'pass', 'fail', or 'dead'
5 | """
6 | status = summary.get('status')
7 | if status is not None:
8 | return status
9 |
10 | success = summary.get('success')
11 | if success is True:
12 | status = 'pass'
13 | elif success is False:
14 | status = 'fail'
15 | else:
16 | status = None
17 | return status
18 |
19 |
20 | def set_status(summary, status):
21 | """
22 | Sets summary['status'] to status, and summary['success'] to True if status
23 | is 'pass'. If status is not 'pass', then 'success' is False.
24 |
25 | If status is None, do nothing.
26 |
27 | :param summary: The job summary dict. Normally ctx.summary
28 | :param status: The job status, e.g. 'pass', 'fail', 'dead'
29 | """
30 | if status is None:
31 | return
32 |
33 | summary['status'] = status
34 | if status == 'pass':
35 | summary['success'] = True
36 | else:
37 | summary['success'] = False
38 |
39 |
--------------------------------------------------------------------------------
/teuthology/task/internal/lock_machines.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import logging
3 |
4 | import teuthology.lock.ops
5 | import teuthology.lock.query
6 | import teuthology.lock.util
7 |
8 | log = logging.getLogger(__name__)
9 |
10 |
11 | @contextlib.contextmanager
12 | def lock_machines(ctx, config):
13 | """
14 | Lock machines. Called when the teuthology run finds and locks
15 | new machines. This is not called if the one has teuthology-locked
16 | machines and placed those keys in the Targets section of a yaml file.
17 | """
18 | assert isinstance(config[0], int), 'config[0] must be an integer'
19 | machine_type = config[1]
20 | total_requested = config[0]
21 | # We want to make sure there are always this many machines available
22 | teuthology.lock.ops.block_and_lock_machines(ctx, total_requested, machine_type)
23 | try:
24 | yield
25 | finally:
26 | if ctx.config.get("unlock_on_failure", True):
27 | log.info('Unlocking machines...')
28 | for machine in ctx.config['targets'].keys():
29 | teuthology.lock.ops.unlock_one(machine, ctx.owner, ctx.archive)
30 |
--------------------------------------------------------------------------------
/teuthology/task/interactive.py:
--------------------------------------------------------------------------------
1 | """
2 | Drop into a python shell
3 | """
4 | import code
5 | import readline
6 | import rlcompleter
7 | rlcompleter.__name__ # silence pyflakes
8 | import pprint
9 |
10 | readline.parse_and_bind('tab: complete')
11 |
12 | def task(ctx, config):
13 | """
14 | Run an interactive Python shell, with the cluster accessible via
15 | the ``ctx`` variable.
16 |
17 | Hit ``control-D`` to continue.
18 |
19 | This is also useful to pause the execution of the test between two
20 | tasks, either to perform ad hoc operations, or to examine the
21 | state of the cluster. You can also use it to easily bring up a
22 | Ceph cluster for ad hoc testing.
23 |
24 | For example::
25 |
26 | tasks:
27 | - ceph:
28 | - interactive:
29 | """
30 |
31 | pp = pprint.PrettyPrinter().pprint
32 | code.interact(
33 | banner='Ceph test interactive mode, use ctx to interact with the cluster, press control-D to exit...',
34 | # TODO simplify this
35 | local=dict(
36 | ctx=ctx,
37 | config=config,
38 | pp=pp,
39 | ),
40 | )
41 |
--------------------------------------------------------------------------------
/scripts/supervisor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 |
4 | import teuthology.dispatcher.supervisor
5 |
6 |
7 | def parse_args(argv):
8 | parser = argparse.ArgumentParser(
9 | description="Supervise and run a teuthology job; normally only run by the dispatcher",
10 | )
11 | parser.add_argument(
12 | "-v",
13 | "--verbose",
14 | action="store_true",
15 | help="be more verbose",
16 | )
17 | parser.add_argument(
18 | "-a",
19 | "--archive-dir",
20 | type=str,
21 | help="path in which to store the job's logfiles",
22 | required=True,
23 | )
24 | parser.add_argument(
25 | "--bin-path",
26 | type=str,
27 | help="teuthology bin path",
28 | required=True,
29 | )
30 | parser.add_argument(
31 | "--job-config",
32 | type=str,
33 | help="file descriptor of job's config file",
34 | required=True,
35 | )
36 | return parser.parse_args(argv)
37 |
38 |
39 | def main():
40 | sys.exit(teuthology.dispatcher.supervisor.main(parse_args(sys.argv[1:])))
41 |
42 |
43 | if __name__ == "__main__":
44 | main()
45 |
--------------------------------------------------------------------------------
/teuthology/task/full_sequential.py:
--------------------------------------------------------------------------------
1 | """
2 | Task sequencer - full
3 | """
4 | import sys
5 | import logging
6 |
7 | from teuthology import run_tasks
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def task(ctx, config):
13 | """
14 | Run a set of tasks to completion in order. __exit__ is called on a task
15 | before __enter__ on the next
16 |
17 | example::
18 | - full_sequential:
19 | - tasktest:
20 | - tasktest:
21 |
22 | :param ctx: Context
23 | :param config: Configuration
24 | """
25 | for entry in config:
26 | if not isinstance(entry, dict):
27 | entry = ctx.config.get(entry, {})
28 | ((taskname, confg),) = entry.items()
29 | log.info('In full_sequential, running task %s...' % taskname)
30 | mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
31 | if hasattr(mgr, '__enter__'):
32 | try:
33 | mgr.__enter__()
34 | finally:
35 | try:
36 | exc_info = sys.exc_info()
37 | mgr.__exit__(*exc_info)
38 | finally:
39 | del exc_info
40 |
--------------------------------------------------------------------------------
/teuthology/task/tests/test_run.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import pytest
3 |
4 | from io import StringIO
5 |
6 | from teuthology.exceptions import CommandFailedError
7 |
8 | log = logging.getLogger(__name__)
9 |
10 |
11 | class TestRun(object):
12 | """
13 | Tests to see if we can make remote procedure calls to the current cluster
14 | """
15 |
16 | def test_command_failed_label(self, ctx, config):
17 | result = ""
18 | try:
19 | ctx.cluster.run(
20 | args=["python3", "-c", "assert False"],
21 | label="working as expected, nothing to see here"
22 | )
23 | except CommandFailedError as e:
24 | result = str(e)
25 |
26 | assert "working as expected" in result
27 |
28 | def test_command_failed_no_label(self, ctx, config):
29 | with pytest.raises(CommandFailedError):
30 | ctx.cluster.run(
31 | args=["python3", "-c", "assert False"],
32 | )
33 |
34 | def test_command_success(self, ctx, config):
35 | result = StringIO()
36 | ctx.cluster.run(
37 | args=["python3", "-c", "print('hi')"],
38 | stdout=result
39 | )
40 | assert result.getvalue().strip() == "hi"
41 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/test_config.py:
--------------------------------------------------------------------------------
1 | from teuthology.config import config
2 |
3 |
4 | class TestOpenStack(object):
5 |
6 | def setup_method(self):
7 | self.openstack_config = config['openstack']
8 |
9 | def test_config_clone(self):
10 | assert 'clone' in self.openstack_config
11 |
12 | def test_config_user_data(self):
13 | os_type = 'rhel'
14 | os_version = '7.0'
15 | template_path = self.openstack_config['user-data'].format(
16 | os_type=os_type,
17 | os_version=os_version)
18 | assert os_type in template_path
19 | assert os_version in template_path
20 |
21 | def test_config_ip(self):
22 | assert 'ip' in self.openstack_config
23 |
24 | def test_config_machine(self):
25 | assert 'machine' in self.openstack_config
26 | machine_config = self.openstack_config['machine']
27 | assert 'disk' in machine_config
28 | assert 'ram' in machine_config
29 | assert 'cpus' in machine_config
30 |
31 | def test_config_volumes(self):
32 | assert 'volumes' in self.openstack_config
33 | volumes_config = self.openstack_config['volumes']
34 | assert 'count' in volumes_config
35 | assert 'size' in volumes_config
36 |
--------------------------------------------------------------------------------
/teuthology/test/task/test_selinux.py:
--------------------------------------------------------------------------------
1 | from mock import patch, Mock, DEFAULT
2 |
3 | from teuthology.config import FakeNamespace
4 | from teuthology.orchestra.cluster import Cluster
5 | from teuthology.orchestra.remote import Remote
6 | from teuthology.task.selinux import SELinux
7 |
8 |
9 | class TestSELinux(object):
10 | def setup_method(self):
11 | self.ctx = FakeNamespace()
12 | self.ctx.config = dict()
13 |
14 | def test_host_exclusion(self):
15 | with patch.multiple(
16 | Remote,
17 | os=DEFAULT,
18 | run=DEFAULT,
19 | ):
20 | self.ctx.cluster = Cluster()
21 | remote1 = Remote('remote1')
22 | remote1.os = Mock()
23 | remote1.os.package_type = 'rpm'
24 | remote1._is_vm = False
25 | self.ctx.cluster.add(remote1, ['role1'])
26 | remote2 = Remote('remote1')
27 | remote2.os = Mock()
28 | remote2.os.package_type = 'deb'
29 | remote2._is_vm = False
30 | self.ctx.cluster.add(remote2, ['role2'])
31 | task_config = dict()
32 | with SELinux(self.ctx, task_config) as task:
33 | remotes = list(task.cluster.remotes)
34 | assert remotes == [remote1]
35 |
36 |
--------------------------------------------------------------------------------
/scripts/queue.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.config
4 | import teuthology.beanstalk
5 |
6 | doc = """
7 | usage: teuthology-queue -h
8 | teuthology-queue [-s|-d|-f] -m MACHINE_TYPE
9 | teuthology-queue [-r] -m MACHINE_TYPE
10 | teuthology-queue -m MACHINE_TYPE -D PATTERN
11 | teuthology-queue -p SECONDS [-m MACHINE_TYPE]
12 |
13 | List Jobs in queue.
14 | If -D is passed, then jobs with PATTERN in the job name are deleted from the
15 | queue.
16 |
17 | Arguments:
18 | -m, --machine_type MACHINE_TYPE [default: multi]
19 | Which machine type queue to work on.
20 |
21 | optional arguments:
22 | -h, --help Show this help message and exit
23 | -D, --delete PATTERN Delete Jobs with PATTERN in their name
24 | -d, --description Show job descriptions
25 | -r, --runs Only show run names
26 | -f, --full Print the entire job config. Use with caution.
27 | -s, --status Prints the status of the queue
28 | -p, --pause SECONDS Pause queues for a number of seconds. A value of 0
29 | will unpause. If -m is passed, pause that queue,
30 | otherwise pause all queues.
31 | """
32 |
33 |
34 | def main():
35 | args = docopt.docopt(doc)
36 | teuthology.beanstalk.main(args)
37 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-debian-8.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | runcmd:
21 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
22 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
23 | - echo '{username} ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
24 | final_message: "{up}, after $UPTIME seconds"
25 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-6.5-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - yum install -y yum-utils && yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/6/x86_64/ && yum install --nogpgcheck -y epel-release && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 && rm /etc/yum.repos.d/dl.fedoraproject.org*
9 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - dracut-modules-growroot
21 | runcmd:
22 | - mkinitrd --force /boot/initramfs-2.6.32-573.3.1.el6.x86_64.img 2.6.32-573.3.1.el6.x86_64
23 | - reboot
24 | final_message: "{up}, after $UPTIME seconds"
25 |
--------------------------------------------------------------------------------
/teuthology/task/internal/check_lock.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import teuthology.lock.query
4 | import teuthology.lock.util
5 |
6 | from teuthology.config import config as teuth_config
7 |
8 | log = logging.getLogger(__name__)
9 |
10 |
11 | def check_lock(ctx, config, check_up=True):
12 | """
13 | Check lock status of remote machines.
14 | """
15 | if not teuth_config.lock_server or ctx.config.get('check-locks') is False:
16 | log.info('Lock checking disabled.')
17 | return
18 | log.info('Checking locks...')
19 | for machine in ctx.config['targets'].keys():
20 | status = teuthology.lock.query.get_status(machine)
21 | log.debug('machine status is %s', repr(status))
22 | assert status is not None, \
23 | 'could not read lock status for {name}'.format(name=machine)
24 | if check_up:
25 | assert status['up'], 'machine {name} is marked down'.format(
26 | name=machine
27 | )
28 | assert status['locked'], \
29 | 'machine {name} is not locked'.format(name=machine)
30 | assert status['locked_by'] == ctx.owner, \
31 | 'machine {name} is locked by {user}, not {owner}'.format(
32 | name=machine,
33 | user=status['locked_by'],
34 | owner=ctx.owner,
35 | )
36 |
--------------------------------------------------------------------------------
/teuthology/task/loop.py:
--------------------------------------------------------------------------------
1 | """
2 | Task to loop a list of items
3 | """
4 | import sys
5 | import logging
6 |
7 | from teuthology import run_tasks
8 |
9 | log = logging.getLogger(__name__)
10 |
11 | def task(ctx, config):
12 | """
13 | Loop a sequential group of tasks
14 |
15 | example::
16 |
17 | - loop:
18 | count: 10
19 | body:
20 | - tasktest:
21 | - tasktest:
22 |
23 | :param ctx: Context
24 | :param config: Configuration
25 | """
26 | for i in range(config.get('count', 1)):
27 | stack = []
28 | try:
29 | for entry in config.get('body', []):
30 | if not isinstance(entry, dict):
31 | entry = ctx.config.get(entry, {})
32 | ((taskname, confg),) = entry.items()
33 | log.info('In sequential, running task %s...' % taskname)
34 | mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
35 | if hasattr(mgr, '__enter__'):
36 | mgr.__enter__()
37 | stack.append(mgr)
38 | finally:
39 | try:
40 | exc_info = sys.exc_info()
41 | while stack:
42 | mgr = stack.pop()
43 | mgr.__exit__(*exc_info)
44 | finally:
45 | del exc_info
46 |
--------------------------------------------------------------------------------
/teuthology/provision/test/test_pelagos.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | from pytest import raises
3 | from teuthology.config import config
4 | from teuthology.provision import pelagos
5 |
6 | import teuthology.provision
7 |
8 |
9 | test_config = dict(
10 | pelagos=dict(
11 | endpoint='http://pelagos.example:5000/',
12 | machine_types='ptype1,ptype2',
13 | ),
14 | )
15 |
16 | class TestPelagos(object):
17 |
18 | def setup_method(self):
19 | config.load(deepcopy(test_config))
20 |
21 | def teardown_method(self):
22 | pass
23 |
24 | def test_get_types(self):
25 | #klass = pelagos.Pelagos
26 | types = pelagos.get_types()
27 | assert types == ["ptype1", "ptype2"]
28 |
29 | def test_disabled(self):
30 | config.pelagos['endpoint'] = None
31 | enabled = pelagos.enabled()
32 | assert enabled == False
33 |
34 | def test_pelagos(self):
35 | class context:
36 | pass
37 |
38 | ctx = context()
39 | ctx.os_type ='sle'
40 | ctx.os_version = '15.1'
41 | with raises(Exception) as e_info:
42 | teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'ptype1')
43 | e_str = str(e_info)
44 | print("Caught exception: " + e_str)
45 | assert e_str.find(r"Name\sor\sservice\snot\sknown") == -1
46 |
47 |
--------------------------------------------------------------------------------
/teuthology/safepath.py:
--------------------------------------------------------------------------------
1 | import errno
2 | import os
3 |
4 | def munge(path):
5 | """
6 | Munge a potentially hostile path name to be safe to use.
7 |
8 | This very definitely changes the meaning of the path,
9 | but it only does that for unsafe paths.
10 | """
11 | # explicitly ignoring windows as a platform
12 | segments = path.split('/')
13 | # filter out empty segments like foo//bar
14 | segments = [s for s in segments if s!='']
15 | # filter out no-op segments like foo/./bar
16 | segments = [s for s in segments if s!='.']
17 | # all leading dots become underscores; makes .. safe too
18 | for idx, seg in enumerate(segments):
19 | if seg.startswith('.'):
20 | segments[idx] = '_'+seg[1:]
21 | # empty string, "/", "//", etc
22 | if not segments:
23 | segments = ['_']
24 | return '/'.join(segments)
25 |
26 |
27 | def makedirs(root, path):
28 | """
29 | os.makedirs gets confused if the path contains '..', and root might.
30 |
31 | This relies on the fact that `path` has been normalized by munge().
32 | """
33 | segments = path.split('/')
34 | for seg in segments:
35 | root = os.path.join(root, seg)
36 | try:
37 | os.mkdir(root)
38 | except OSError as e:
39 | if e.errno == errno.EEXIST:
40 | pass
41 | else:
42 | raise
43 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:noble
2 | ENV DEBIAN_FRONTEND=noninteractive
3 | ENV LANG=C.UTF-8
4 | RUN apt-get update && \
5 | apt-get install -y \
6 | git \
7 | qemu-utils \
8 | python3-dev \
9 | libssl-dev \
10 | ipmitool \
11 | python3-pip \
12 | python3-venv \
13 | vim \
14 | jq \
15 | curl \
16 | libev-dev \
17 | libvirt-dev \
18 | libffi-dev \
19 | libyaml-dev \
20 | locales \
21 | lsb-release && \
22 | apt-get clean all && \
23 | locale-gen $LC_ALL
24 | WORKDIR /teuthology
25 | COPY requirements.txt requirements.yml ansible.cfg bootstrap /teuthology/
26 | RUN \
27 | cd /teuthology && \
28 | mkdir ../archive_dir && \
29 | mkdir log && \
30 | chmod +x /teuthology/bootstrap && \
31 | PIP_INSTALL_FLAGS="-r requirements.txt" ./bootstrap
32 | COPY . /teuthology
33 | RUN \
34 | (git config -f ./.git/config --unset 'http.https://github.com/.extraheader' || true ) && \
35 | ./bootstrap
36 | COPY containers/teuthology-dev/containerized_node.yaml /teuthology
37 | COPY containers/teuthology-dev/.teuthology.yaml /root
38 | COPY containers/teuthology-dev/teuthology.sh /
39 | RUN \
40 | mkdir $HOME/.ssh && \
41 | touch $HOME/.ssh/id_rsa && \
42 | chmod 600 $HOME/.ssh/id_rsa && \
43 | echo "StrictHostKeyChecking=no" > $HOME/.ssh/config && \
44 | echo "UserKnownHostsFile=/dev/null" >> $HOME/.ssh/config
45 | ENTRYPOINT /teuthology.sh
46 |
--------------------------------------------------------------------------------
/scripts/update_inventory.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology
4 | import teuthology.lock
5 | import teuthology.lock.ops
6 | import teuthology.misc
7 | import teuthology.orchestra.remote
8 |
9 | import logging
10 |
11 | doc = """
12 | usage: teuthology-update-inventory -h
13 | teuthology-update-inventory [-v] [-m type] REMOTE [REMOTE ...]
14 |
15 | Update the given nodes' inventory information on the lock server
16 |
17 |
18 | -h, --help show this help message and exit
19 | -v, --verbose be more verbose
20 | -m , --machine-type optionally specify a machine type when
21 | submitting nodes for the first time
22 | REMOTE hostnames of machines whose information to update
23 |
24 | """
25 |
26 |
27 | def main():
28 | args = docopt.docopt(doc)
29 | if args['--verbose']:
30 | teuthology.log.setLevel(logging.DEBUG)
31 |
32 | machine_type = args.get('--machine-type')
33 | remotes = args.get('REMOTE')
34 | for rem_name in remotes:
35 | rem_name = teuthology.misc.canonicalize_hostname(rem_name)
36 | remote = teuthology.orchestra.remote.Remote(rem_name)
37 | remote.connect()
38 | inventory_info = remote.inventory_info
39 | if machine_type:
40 | inventory_info['machine_type'] = machine_type
41 | teuthology.lock.ops.update_inventory(inventory_info)
42 |
--------------------------------------------------------------------------------
/.github/workflows/dev_container.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: dev_container
3 | on:
4 | push:
5 | branches:
6 | - "main"
7 | pull_request:
8 | branches:
9 | - main
10 | workflow_dispatch:
11 |
12 | jobs:
13 | docker:
14 | runs-on: ${{ matrix.os }}
15 | strategy:
16 | matrix:
17 | include:
18 | - os: ubuntu-24.04
19 | python: "3.12"
20 | - os: ubuntu-24.04-arm
21 | python: "3.12"
22 | steps:
23 | - name: Checkout
24 | uses: actions/checkout@v4
25 | - name: Login to Quay.io
26 | if: github.event_name == 'push' && github.ref_name == 'main'
27 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
28 | with:
29 | registry: quay.io
30 | username: ${{ secrets.QUAY_USERNAME }}
31 | password: ${{ secrets.QUAY_ROBOT_TOKEN }}
32 | - name: Build and push
33 | uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
34 | env:
35 | QUAY_URI: quay.io/ceph-infra/teuthology-dev
36 | QUAY_TAG: ${{ github.event_name == 'pull_request' && github.head_ref || github.ref_name }}
37 | with:
38 | context: .
39 | file: containers/teuthology-dev/Dockerfile
40 | push: ${{ github.event_name == 'push' && github.ref_name == 'main' }}
41 | tags: ${{ env.QUAY_URI }}:${{ env.QUAY_TAG }}
42 | outputs: type=image,name=target
43 |
--------------------------------------------------------------------------------
/.github/workflows/integration.yml:
--------------------------------------------------------------------------------
1 | name: integration
2 | on:
3 | pull_request:
4 | workflow_dispatch:
5 | jobs:
6 | test:
7 | runs-on: ubuntu-24.04
8 | steps:
9 | - uses: actions/checkout@v4
10 | - name: Make archive directory
11 | run: mkdir /tmp/archive_dir
12 | - name: Test using docker-compose
13 | run: ./start.sh
14 | working-directory: ./docs/docker-compose
15 | - name: Rename Directory
16 | # Replace ":" with "_" everywhere in directory path.
17 | # This needs to be done because GA does not support ":" colon character in artifacts (like in /root-2025-03-06_18:47:26-teuthology:no-ceph-main-distro-default-testnode).
18 | # Invalid characters include: Double quote ", Colon :, Less than <, Greater than >, Vertical bar |, Asterisk *, Question mark ?, Carriage return \r, Line feed \n
19 | if: always()
20 | run: |
21 | for DIR in /tmp/archive_dir/root-*; do
22 | SAFE_DIR="${DIR//:/_}" # Replace in '/tmp/archive_dir/root-2025-03-06_18:47:26-teuthology:no-ceph-main-distro-default-testnode'
23 | if [ "$DIR" != "$SAFE_DIR" ]; then
24 | mv "$DIR" "$SAFE_DIR"
25 | fi
26 | done
27 | - name: Upload teuthology archive logs
28 | uses: actions/upload-artifact@v4
29 | if: always()
30 | with:
31 | name: teuthology-logs
32 | path: |
33 | /tmp/archive_dir/*
34 |
--------------------------------------------------------------------------------
/teuthology/task/timer.py:
--------------------------------------------------------------------------------
1 | """
2 | Timer task
3 | """
4 | import logging
5 | import contextlib
6 | import datetime
7 |
8 | log = logging.getLogger(__name__)
9 |
10 | @contextlib.contextmanager
11 | def task(ctx, config):
12 | """
13 | Timer
14 |
15 | Measure the time that this set of tasks takes and save that value in the summary file.
16 | Config is a description of what we are timing.
17 |
18 | example::
19 |
20 | tasks:
21 | - ceph:
22 | - foo:
23 | - timer: "fsx run"
24 | - fsx:
25 |
26 | """
27 | start = datetime.datetime.now()
28 | log.debug("got here in timer")
29 | try:
30 | yield
31 | finally:
32 | nowinfo = datetime.datetime.now()
33 | elapsed = nowinfo - start
34 | datesaved = nowinfo.isoformat(' ')
35 | hourz, remainder = divmod(elapsed.seconds, 3600)
36 | minutez, secondz = divmod(remainder, 60)
37 | elapsedtime = "%02d:%02d:%02d.%06d" % (hourz,minutez,secondz, elapsed.microseconds)
38 | dateinfo = (datesaved, elapsedtime)
39 | if not 'timer' in ctx.summary:
40 | ctx.summary['timer'] = {config : [dateinfo]}
41 | else:
42 | if config in ctx.summary['timer']:
43 | ctx.summary['timer'][config].append(dateinfo)
44 | else:
45 | ctx.summary['timer'][config] = [dateinfo]
46 | log.info('Elapsed time for %s -- %s' % (config,elapsedtime))
47 |
--------------------------------------------------------------------------------
/teuthology/provision/cloud/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from teuthology.config import config
4 |
5 | from teuthology.provision.cloud import openstack
6 |
7 | log = logging.getLogger(__name__)
8 |
9 |
10 | supported_drivers = dict(
11 | openstack=dict(
12 | provider=openstack.OpenStackProvider,
13 | provisioner=openstack.OpenStackProvisioner,
14 | ),
15 | )
16 |
17 |
18 | def get_types():
19 | types = list()
20 | if 'libcloud' in config and 'providers' in config.libcloud:
21 | types = list(config.libcloud['providers'].keys())
22 | return types
23 |
24 |
25 | def get_provider_conf(node_type):
26 | all_providers = config.libcloud['providers']
27 | provider_conf = all_providers[node_type]
28 | return provider_conf
29 |
30 |
31 | def get_provider(node_type):
32 | provider_conf = get_provider_conf(node_type)
33 | driver = provider_conf['driver']
34 | provider_cls = supported_drivers[driver]['provider']
35 | return provider_cls(name=node_type, conf=provider_conf)
36 |
37 |
38 | def get_provisioner(node_type, name, os_type, os_version, conf=None):
39 | provider = get_provider(node_type)
40 | provider_conf = get_provider_conf(node_type)
41 | driver = provider_conf['driver']
42 | provisioner_cls = supported_drivers[driver]['provisioner']
43 | return provisioner_cls(
44 | provider=provider,
45 | name=name,
46 | os_type=os_type,
47 | os_version=os_version,
48 | conf=conf,
49 | )
50 |
--------------------------------------------------------------------------------
/teuthology/test/test_schedule.py:
--------------------------------------------------------------------------------
1 | from teuthology.schedule import build_config
2 | from teuthology.misc import get_user
3 |
4 |
5 | class TestSchedule(object):
6 | basic_args = {
7 | '--verbose': False,
8 | '--owner': 'OWNER',
9 | '--description': 'DESC',
10 | '--email': 'EMAIL',
11 | '--first-in-suite': False,
12 | '--last-in-suite': True,
13 | '--name': 'NAME',
14 | '--worker': 'tala',
15 | '--timeout': '6',
16 | '--priority': '99',
17 | # TODO: make this work regardless of $PWD
18 | #'': ['../../examples/3node_ceph.yaml',
19 | # '../../examples/3node_rgw.yaml'],
20 | }
21 |
22 | def test_basic(self):
23 | expected = {
24 | 'description': 'DESC',
25 | 'email': 'EMAIL',
26 | 'first_in_suite': False,
27 | 'last_in_suite': True,
28 | 'machine_type': 'tala',
29 | 'name': 'NAME',
30 | 'owner': 'OWNER',
31 | 'priority': 99,
32 | 'results_timeout': '6',
33 | 'verbose': False,
34 | 'tube': 'tala',
35 | }
36 |
37 | job_dict = build_config(self.basic_args)
38 | assert job_dict == expected
39 |
40 | def test_owner(self):
41 | args = self.basic_args
42 | args['--owner'] = None
43 | job_dict = build_config(self.basic_args)
44 | assert job_dict['owner'] == 'scheduled_%s' % get_user()
45 |
46 |
--------------------------------------------------------------------------------
/scripts/prune_logs.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.config
4 | import teuthology.prune
5 |
6 | doc = """
7 | usage:
8 | teuthology-prune-logs -h
9 | teuthology-prune-logs [-v] [options]
10 |
11 | Prune old logfiles from the archive
12 |
13 | optional arguments:
14 | -h, --help Show this help message and exit
15 | -v, --verbose Be more verbose
16 | -a ARCHIVE, --archive ARCHIVE
17 | The base archive directory
18 | [default: {archive_base}]
19 | --dry-run Don't actually delete anything; just log what would be
20 | deleted
21 | -p DAYS, --pass DAYS Remove all logs for jobs which passed and are older
22 | than DAYS. Negative values will skip this operation.
23 | [default: 14]
24 | -f DAYS, --fail DAYS Like --pass, but for failed jobs. [default: -1]
25 | -r DAYS, --remotes DAYS
26 | Remove the 'remote' subdir of jobs older than DAYS.
27 | Negative values will skip this operation.
28 | [default: 60]
29 | -z DAYS, --compress DAYS
30 | Compress (using gzip) any teuthology.log files older
31 | than DAYS. Negative values will skip this operation.
32 | [default: 30]
33 | """.format(archive_base=teuthology.config.config.archive_base)
34 |
35 |
36 | def main():
37 | args = docopt.docopt(doc)
38 | teuthology.prune.main(args)
39 |
--------------------------------------------------------------------------------
/teuthology/test/test_get_distro.py:
--------------------------------------------------------------------------------
1 | from teuthology.misc import get_distro
2 |
3 |
4 | class Mock:
5 | pass
6 |
7 |
8 | class TestGetDistro(object):
9 |
10 | def setup_method(self):
11 | self.fake_ctx = Mock()
12 | self.fake_ctx.config = {}
13 | # os_type in ctx will always default to None
14 | self.fake_ctx.os_type = None
15 |
16 | def test_default_distro(self):
17 | distro = get_distro(self.fake_ctx)
18 | assert distro == 'ubuntu'
19 |
20 | def test_argument(self):
21 | # we don't want fake_ctx to have a config
22 | self.fake_ctx = Mock()
23 | self.fake_ctx.os_type = 'centos'
24 | distro = get_distro(self.fake_ctx)
25 | assert distro == 'centos'
26 |
27 | def test_teuth_config(self):
28 | self.fake_ctx.config = {'os_type': 'fedora'}
29 | distro = get_distro(self.fake_ctx)
30 | assert distro == 'fedora'
31 |
32 | def test_argument_takes_precedence(self):
33 | self.fake_ctx.config = {'os_type': 'fedora'}
34 | self.fake_ctx.os_type = "centos"
35 | distro = get_distro(self.fake_ctx)
36 | assert distro == 'centos'
37 |
38 | def test_no_config_or_os_type(self):
39 | self.fake_ctx = Mock()
40 | self.fake_ctx.os_type = None
41 | distro = get_distro(self.fake_ctx)
42 | assert distro == 'ubuntu'
43 |
44 | def test_config_os_type_is_none(self):
45 | self.fake_ctx.config["os_type"] = None
46 | distro = get_distro(self.fake_ctx)
47 | assert distro == 'ubuntu'
48 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-12.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | - SuSEfirewall2 stop
12 | preserve_hostname: true
13 | users:
14 | - name: {username}
15 | gecos: User
16 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
17 | groups: users
18 | runcmd:
19 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
22 | lsb-release make
23 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
24 | - systemctl restart ntpd.service
25 | final_message: "{up}, after $UPTIME seconds"
26 |
--------------------------------------------------------------------------------
/teuthology/task/sequential.py:
--------------------------------------------------------------------------------
1 | """
2 | Task sequencer
3 | """
4 | import sys
5 | import logging
6 |
7 | from teuthology import run_tasks
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def task(ctx, config):
13 | """
14 | Sequentialize a group of tasks into one executable block
15 |
16 | example::
17 |
18 | - sequential:
19 | - tasktest:
20 | - tasktest:
21 |
22 | You can also reference the job from elsewhere::
23 |
24 | foo:
25 | tasktest:
26 | tasks:
27 | - sequential:
28 | - tasktest:
29 | - foo
30 | - tasktest:
31 |
32 | That is, if the entry is not a dict, we will look it up in the top-level
33 | config.
34 |
35 | Sequential tasks and Parallel tasks can be nested.
36 |
37 | :param ctx: Context
38 | :param config: Configuration
39 | """
40 | stack = []
41 | try:
42 | for entry in config:
43 | if not isinstance(entry, dict):
44 | entry = ctx.config.get(entry, {})
45 | ((taskname, confg),) = entry.items()
46 | log.info('In sequential, running task %s...' % taskname)
47 | mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
48 | if hasattr(mgr, '__enter__'):
49 | mgr.__enter__()
50 | stack.append(mgr)
51 | finally:
52 | try:
53 | exc_info = sys.exc_info()
54 | while stack:
55 | mgr = stack.pop()
56 | mgr.__exit__(*exc_info)
57 | finally:
58 | del exc_info
59 |
--------------------------------------------------------------------------------
/scripts/test/test_run.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | from script import Script
4 | from scripts import run
5 |
6 | doc = run.__doc__
7 |
8 |
9 | class TestRun(Script):
10 | script_name = 'teuthology'
11 |
12 | def test_all_args(self):
13 | args = docopt.docopt(doc, [
14 | "--verbose",
15 | "--archive", "some/archive/dir",
16 | "--description", "the_description",
17 | "--owner", "the_owner",
18 | "--lock",
19 | "--machine-type", "machine_type",
20 | "--os-type", "os_type",
21 | "--os-version", "os_version",
22 | "--block",
23 | "--name", "the_name",
24 | "--suite-path", "some/suite/dir",
25 | "path/to/config.yml",
26 | ])
27 | assert args["--verbose"]
28 | assert args["--archive"] == "some/archive/dir"
29 | assert args["--description"] == "the_description"
30 | assert args["--owner"] == "the_owner"
31 | assert args["--lock"]
32 | assert args["--machine-type"] == "machine_type"
33 | assert args["--os-type"] == "os_type"
34 | assert args["--os-version"] == "os_version"
35 | assert args["--block"]
36 | assert args["--name"] == "the_name"
37 | assert args["--suite-path"] == "some/suite/dir"
38 | assert args[""] == ["path/to/config.yml"]
39 |
40 | def test_multiple_configs(self):
41 | args = docopt.docopt(doc, [
42 | "config1.yml",
43 | "config2.yml",
44 | ])
45 | assert args[""] == ["config1.yml", "config2.yml"]
46 |
--------------------------------------------------------------------------------
/teuthology/orchestra/monkey.py:
--------------------------------------------------------------------------------
1 | """
2 | Monkey patches (paramiko support)
3 | """
4 | import logging
5 |
6 | log = logging.getLogger(__name__)
7 |
8 | def patch_001_paramiko_deprecation():
9 | """
10 | Silence an an unhelpful Deprecation Warning triggered by Paramiko.
11 |
12 | Not strictly a monkeypatch.
13 | """
14 | import warnings
15 | warnings.filterwarnings(
16 | category=DeprecationWarning,
17 | message='This application uses RandomPool,',
18 | action='ignore',
19 | )
20 |
21 |
22 | def patch_100_paramiko_log():
23 | """
24 | Silence some noise paramiko likes to log.
25 |
26 | Not strictly a monkeypatch.
27 | """
28 | logging.getLogger('paramiko.transport').setLevel(logging.WARNING)
29 |
30 |
31 | def patch_100_logger_getChild():
32 | """
33 | Imitate Python 2.7 feature Logger.getChild.
34 | """
35 | import logging
36 | if not hasattr(logging.Logger, 'getChild'):
37 | def getChild(self, name):
38 | return logging.getLogger('.'.join([self.name, name]))
39 | logging.Logger.getChild = getChild
40 |
41 |
42 | def patch_100_trigger_rekey():
43 | # Fixes http://tracker.ceph.com/issues/15236
44 | from paramiko.packet import Packetizer
45 | Packetizer._trigger_rekey = lambda self: True
46 |
47 |
48 | def patch_all():
49 | """
50 | Run all the patch_* functions in this module.
51 | """
52 | monkeys = [(k, v) for (k, v) in globals().items() if k.startswith('patch_') and k != 'patch_all']
53 | monkeys.sort()
54 | for k, v in monkeys:
55 | log.debug('Patching %s', k)
56 | v()
57 |
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 | ARG SSH_PRIVKEY_FILE=id_ed25519
3 | ENV DEBIAN_FRONTEND=noninteractive
4 | ENV LANG=C.UTF-8
5 | RUN apt-get update && \
6 | apt-get install -y \
7 | git \
8 | jq \
9 | curl \
10 | qemu-utils \
11 | python3-dev \
12 | libssl-dev \
13 | ipmitool \
14 | python3-pip \
15 | python3-venv \
16 | vim \
17 | locales-all \
18 | libev-dev \
19 | libvirt-dev \
20 | libffi-dev \
21 | libyaml-dev \
22 | locales \
23 | lsb-release && \
24 | apt-get clean all && \
25 | locale-gen $LC_ALL
26 | WORKDIR /teuthology
27 | COPY requirements.txt requirements.yml ansible.cfg bootstrap /teuthology/
28 | RUN \
29 | cd /teuthology && \
30 | mkdir ../archive_dir && \
31 | mkdir log && \
32 | chmod +x /teuthology/bootstrap && \
33 | PIP_INSTALL_FLAGS="-r requirements.txt" ./bootstrap
34 | COPY . /teuthology
35 | RUN \
36 | ./bootstrap
37 | COPY docs/docker-compose/teuthology/containerized_node.yaml /teuthology
38 | COPY docs/docker-compose/teuthology/.teuthology.yaml /root
39 | COPY docs/docker-compose/teuthology/teuthology.sh /
40 | RUN mkdir -p /etc/ansible
41 | COPY docs/docker-compose/teuthology/ansible_inventory/hosts /etc/ansible/
42 | COPY docs/docker-compose/teuthology/ansible_inventory/secrets /etc/ansible/
43 | RUN \
44 | mkdir $HOME/.ssh && \
45 | touch $HOME/.ssh/${SSH_PRIVKEY_FILE} && \
46 | chmod 600 $HOME/.ssh/${SSH_PRIVKEY_FILE} && \
47 | echo "StrictHostKeyChecking=no" > $HOME/.ssh/config && \
48 | echo "UserKnownHostsFile=/dev/null" >> $HOME/.ssh/config
49 | ENTRYPOINT /teuthology.sh
50 |
--------------------------------------------------------------------------------
/teuthology/test/test_safepath.py:
--------------------------------------------------------------------------------
1 | from teuthology import safepath
2 |
3 | class TestSafepath(object):
4 | def test_simple(self):
5 | got = safepath.munge('foo')
6 | assert got == 'foo'
7 |
8 | def test_empty(self):
9 | # really odd corner case
10 | got = safepath.munge('')
11 | assert got == '_'
12 |
13 | def test_slash(self):
14 | got = safepath.munge('/')
15 | assert got == '_'
16 |
17 | def test_slashslash(self):
18 | got = safepath.munge('//')
19 | assert got == '_'
20 |
21 | def test_absolute(self):
22 | got = safepath.munge('/evil')
23 | assert got == 'evil'
24 |
25 | def test_absolute_subdir(self):
26 | got = safepath.munge('/evil/here')
27 | assert got == 'evil/here'
28 |
29 | def test_dot_leading(self):
30 | got = safepath.munge('./foo')
31 | assert got == 'foo'
32 |
33 | def test_dot_middle(self):
34 | got = safepath.munge('evil/./foo')
35 | assert got == 'evil/foo'
36 |
37 | def test_dot_trailing(self):
38 | got = safepath.munge('evil/foo/.')
39 | assert got == 'evil/foo'
40 |
41 | def test_dotdot(self):
42 | got = safepath.munge('../evil/foo')
43 | assert got == '_./evil/foo'
44 |
45 | def test_dotdot_subdir(self):
46 | got = safepath.munge('evil/../foo')
47 | assert got == 'evil/_./foo'
48 |
49 | def test_hidden(self):
50 | got = safepath.munge('.evil')
51 | assert got == '_evil'
52 |
53 | def test_hidden_subdir(self):
54 | got = safepath.munge('foo/.evil')
55 | assert got == 'foo/_evil'
56 |
--------------------------------------------------------------------------------
/scripts/run.py:
--------------------------------------------------------------------------------
1 | """
2 | usage: teuthology --help
3 | teuthology --version
4 | teuthology [options] [--] ...
5 |
6 | Run ceph integration tests
7 |
8 | positional arguments:
9 | one or more config files to read
10 |
11 | optional arguments:
12 | -h, --help show this help message and exit
13 | -v, --verbose be more verbose
14 | --version the current installed version of teuthology
15 | -a DIR, --archive DIR path to archive results in
16 | --description DESCRIPTION job description
17 | --owner OWNER job owner
18 | --lock lock machines for the duration of the run
19 | --machine-type MACHINE_TYPE Type of machine to lock/run tests on.
20 | --os-type OS_TYPE Distro/OS of machine to run test on.
21 | --os-version OS_VERSION Distro/OS version of machine to run test on.
22 | --block block until locking machines succeeds (use with --lock)
23 | --name NAME name for this teuthology run
24 | --suite-path SUITE_PATH Location of ceph-qa-suite on disk. If not specified,
25 | it will be fetched
26 | --interactive-on-error drop to a python shell on failure, which will
27 | halt the job; developer can then ssh to targets
28 | and examine cluster state.
29 |
30 | """
31 | import docopt
32 |
33 | import teuthology.run
34 |
35 |
36 | def main():
37 | args = docopt.docopt(__doc__, version=teuthology.__version__)
38 | teuthology.run.main(args)
39 |
--------------------------------------------------------------------------------
/.github/workflows/dependencies.yml:
--------------------------------------------------------------------------------
1 | name: dependencies
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | upgrade:
11 | name: Test dependencies
12 | runs-on: ${{ matrix.os }}
13 | strategy:
14 | matrix:
15 | include:
16 | - os: ubuntu-22.04
17 | python: "3.10"
18 | - os: ubuntu-22.04
19 | python: "3.11"
20 | steps:
21 | - name: Set up Python
22 | uses: actions/setup-python@v5
23 | with:
24 | python-version: ${{ matrix.python }}
25 | - name: Checkout default branch
26 | uses: actions/checkout@v4
27 | with:
28 | ref: main
29 | path: teuthology
30 | - name: virtualenv
31 | run: |
32 | pip install --user virtualenv
33 | virtualenv ./virtualenv
34 | cd ./virtualenv/lib/python*
35 | touch no-global-site-packages.txt
36 | working-directory: ./teuthology
37 | - name: Refresh system repos
38 | run: |
39 | sudo apt update -y
40 | sudo apt upgrade -y
41 | - name: Initial bootstrap
42 | run: ./bootstrap install
43 | working-directory: ./teuthology
44 | - name: Move initial repository
45 | run: mv teuthology teuthology.orig
46 | - name: Checkout desired ref
47 | uses: actions/checkout@v4
48 | with:
49 | path: teuthology
50 | - name: Move virtualenv to new checkout
51 | run: mv ./teuthology.orig/virtualenv ./teuthology/
52 | - name: Re-run bootstrap
53 | run: ./bootstrap install
54 | working-directory: ./teuthology
55 |
--------------------------------------------------------------------------------
/teuthology/provision/test/test_init_provision.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | from pytest import raises
3 | from teuthology.config import config
4 |
5 | import teuthology.provision
6 |
7 | test_config = dict(
8 | pelagos=dict(
9 | endpoint='http://pelagos.example:5000/',
10 | machine_types='ptype1,ptype2,common_type',
11 | ),
12 | fog=dict(
13 | endpoint='http://fog.example.com/fog',
14 | api_token='API_TOKEN',
15 | user_token='USER_TOKEN',
16 | machine_types='ftype1,ftype2,common_type',
17 | )
18 | )
19 |
20 | class TestInitProvision(object):
21 |
22 | def setup_method(self):
23 | config.load(deepcopy(test_config))
24 |
25 | def test_get_reimage_types(self):
26 | reimage_types = teuthology.provision.get_reimage_types()
27 | assert reimage_types == ["ptype1", "ptype2", "common_type",
28 | "ftype1", "ftype2", "common_type"]
29 |
30 | def test_reimage(self):
31 | class context:
32 | pass
33 | ctx = context()
34 | ctx.os_type = 'sle'
35 | ctx.os_version = '15.1'
36 | with raises(Exception) as e_info:
37 | teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'not-defined-type')
38 | e_str = str(e_info)
39 | print("Caught exception: " + e_str)
40 | assert e_str.find(r"configured\sprovisioners") == -1
41 |
42 | with raises(Exception) as e_info:
43 | teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'common_type')
44 | e_str = str(e_info)
45 | print("Caught exception: " + e_str)
46 | assert e_str.find(r"used\swith\sone\sprovisioner\sonly") == -1
47 |
--------------------------------------------------------------------------------
/teuthology/task/tasktest.py:
--------------------------------------------------------------------------------
1 | """
2 | Parallel and sequential task tester. Not used by any ceph tests, but used to
3 | unit test the parallel and sequential tasks
4 | """
5 | import logging
6 | import contextlib
7 | import time
8 |
9 | log = logging.getLogger(__name__)
10 |
11 | @contextlib.contextmanager
12 | def task(ctx, config):
13 | """
14 | Task that just displays information when it is create and when it is
15 | destroyed/cleaned up. This task was used to test parallel and
16 | sequential task options.
17 |
18 | example::
19 |
20 | tasks:
21 | - sequential:
22 | - tasktest:
23 | - id: 'foo'
24 | - tasktest:
25 | - id: 'bar'
26 | - delay:5
27 | - tasktest:
28 |
29 | The above yaml will sequentially start a test task named foo and a test
30 | task named bar. Bar will take 5 seconds to complete. After foo and bar
31 | have finished, an unidentified tasktest task will run.
32 | """
33 | try:
34 | delay = config.get('delay', 0)
35 | id = config.get('id', 'UNKNOWN')
36 | except AttributeError:
37 | delay = 0
38 | id = 'UNKNOWN'
39 | try:
40 | log.info('**************************************************')
41 | log.info('Started task test -- %s' % id)
42 | log.info('**************************************************')
43 | time.sleep(delay)
44 | yield
45 |
46 | finally:
47 | log.info('**************************************************')
48 | log.info('Task test is being cleaned up -- %s' % id)
49 | log.info('**************************************************')
50 |
51 |
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/teuthology.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -e
3 | # We don't want -x yet, in case the private key is sensitive
4 | if [ -n "$SSH_PRIVKEY_FILE" ]; then
5 | echo "$SSH_PRIVKEY" > $HOME/.ssh/$SSH_PRIVKEY_FILE
6 | fi
7 | source /teuthology/virtualenv/bin/activate
8 | set -x
9 | if [ -n "$TESTNODES" ]; then
10 | for node in $(echo $TESTNODES | tr , ' '); do
11 | teuthology-update-inventory -m $MACHINE_TYPE $node
12 | done
13 | CUSTOM_CONF=${CUSTOM_CONF:-}
14 | else
15 | CUSTOM_CONF=/teuthology/containerized_node.yaml
16 | fi
17 | export MACHINE_TYPE=${MACHINE_TYPE:-testnode}
18 | if [ -z "$TEUTHOLOGY_WAIT" ]; then
19 | if [ -n "$TEUTH_BRANCH" ]; then
20 | TEUTH_BRANCH_FLAG="--teuthology-branch $TEUTH_BRANCH"
21 | fi
22 | teuthology-suite -v \
23 | $TEUTH_BRANCH_FLAG \
24 | --ceph-repo https://github.com/ceph/ceph.git \
25 | --suite-repo https://github.com/ceph/ceph.git \
26 | -c main \
27 | -m $MACHINE_TYPE \
28 | --limit 1 \
29 | -n 100 \
30 | --suite teuthology:no-ceph \
31 | --filter-out "libcephfs,kclient,stream,centos,rhel" \
32 | -d ubuntu -D 22.04 \
33 | --suite-branch main \
34 | --subset 9000/100000 \
35 | -p 75 \
36 | --seed 349 \
37 | --force-priority \
38 | $CUSTOM_CONF
39 | DISPATCHER_EXIT_FLAG='--exit-on-empty-queue'
40 | teuthology-queue -m $MACHINE_TYPE -s | \
41 | python3 -c "import sys, json; assert json.loads(sys.stdin.read())['count'] > 0, 'queue is empty!'"
42 | fi
43 | teuthology-dispatcher -v \
44 | --log-dir /teuthology/log \
45 | --tube $MACHINE_TYPE \
46 | $DISPATCHER_EXIT_FLAG
47 |
--------------------------------------------------------------------------------
/teuthology/orchestra/test/test_systemd.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | from logging import debug
5 | from teuthology import misc
6 | from teuthology.orchestra import cluster
7 | from teuthology.orchestra.run import quote
8 | from teuthology.orchestra.daemon.group import DaemonGroup
9 | import subprocess
10 |
11 |
12 | class FakeRemote(object):
13 | pass
14 |
15 |
16 | def test_pid():
17 | ctx = argparse.Namespace()
18 | ctx.daemons = DaemonGroup(use_systemd=True)
19 | remote = FakeRemote()
20 |
21 | ps_ef_output_path = os.path.join(
22 | os.path.dirname(__file__),
23 | "files/daemon-systemdstate-pid-ps-ef.output"
24 | )
25 |
26 | # patching ps -ef command output using a file
27 | def sh(args):
28 | args[0:2] = ["cat", ps_ef_output_path]
29 | debug(args)
30 | return subprocess.getoutput(quote(args))
31 |
32 | remote.sh = sh
33 | remote.init_system = 'systemd'
34 | remote.shortname = 'host1'
35 |
36 | ctx.cluster = cluster.Cluster(
37 | remotes=[
38 | (remote, ['rgw.0', 'mon.a', 'mgr.a', 'mds.a', 'osd.0'])
39 | ],
40 | )
41 |
42 | for remote, roles in ctx.cluster.remotes.items():
43 | for role in roles:
44 | _, rol, id_ = misc.split_role(role)
45 | if any(rol.startswith(x) for x in ['mon', 'mgr', 'mds']):
46 | ctx.daemons.register_daemon(remote, rol, remote.shortname)
47 | else:
48 | ctx.daemons.register_daemon(remote, rol, id_)
49 |
50 | for _, daemons in ctx.daemons.daemons.items():
51 | for daemon in daemons.values():
52 | pid = daemon.pid
53 | debug(pid)
54 | assert pid
55 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-12.3-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | - SuSEfirewall2 stop
12 | preserve_hostname: true
13 | users:
14 | - name: {username}
15 | gecos: User
16 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
17 | groups: users
18 | runcmd:
19 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
20 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
21 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
22 | - systemctl enable salt-minion.service ntpd.service
23 | - systemctl restart ntpd.service
24 | final_message: "{up}, after $UPTIME seconds"
25 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - zypper --non-interactive --no-gpg-checks refresh
20 | - zypper --non-interactive install --no-recommends wget rsyslog lsb-release make gcc gcc-c++ chrony
21 | - sed -i -e 's/^! pool/pool/' /etc/chrony.conf
22 | - systemctl enable chronyd.service
23 | - systemctl start chronyd.service
24 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
25 | final_message: "{up}, after $UPTIME seconds"
26 |
--------------------------------------------------------------------------------
/teuthology/test/test_ls.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from unittest.mock import patch, Mock
4 |
5 | from teuthology import ls
6 |
7 |
8 | class TestLs(object):
9 | """ Tests for teuthology.ls """
10 |
11 | @patch('os.path.isdir')
12 | @patch('os.listdir')
13 | def test_get_jobs(self, m_listdir, m_isdir):
14 | m_listdir.return_value = ["1", "a", "3"]
15 | m_isdir.return_value = True
16 | results = ls.get_jobs("some/archive/dir")
17 | assert results == ["1", "3"]
18 |
19 | @patch("yaml.safe_load_all")
20 | @patch("teuthology.ls.get_jobs")
21 | def test_ls(self, m_get_jobs, m_safe_load_all):
22 | m_get_jobs.return_value = ["1", "2"]
23 | m_safe_load_all.return_value = [{"failure_reason": "reasons"}]
24 | ls.ls("some/archive/div", True)
25 |
26 | @patch("teuthology.ls.open")
27 | @patch("teuthology.ls.get_jobs")
28 | def test_ls_ioerror(self, m_get_jobs, m_open):
29 | m_get_jobs.return_value = ["1", "2"]
30 | m_open.side_effect = IOError()
31 | with pytest.raises(IOError):
32 | ls.ls("some/archive/dir", True)
33 |
34 | @patch("teuthology.ls.open")
35 | @patch("os.popen")
36 | @patch("os.path.isdir")
37 | @patch("os.path.isfile")
38 | def test_print_debug_info(self, m_isfile, m_isdir, m_popen, m_open):
39 | m_isfile.return_value = True
40 | m_isdir.return_value = True
41 | m_popen.return_value = Mock()
42 | cmdline = Mock()
43 | cmdline.find = Mock(return_value=0)
44 | m1 = Mock()
45 | m2 = Mock()
46 | m2.read = Mock(return_value=cmdline)
47 | m_open.side_effect = [m1, m2]
48 | ls.print_debug_info("the_job", "job/dir", "some/archive/dir")
49 |
--------------------------------------------------------------------------------
/teuthology/task/full_sequential_finally.py:
--------------------------------------------------------------------------------
1 | """
2 | Task sequencer finally
3 | """
4 | import sys
5 | import logging
6 | import contextlib
7 |
8 | from teuthology import run_tasks
9 |
10 | log = logging.getLogger(__name__)
11 |
12 |
13 | @contextlib.contextmanager
14 | def task(ctx, config):
15 | """
16 | Sequentialize a group of tasks into one executable block, run on cleanup
17 |
18 | example::
19 |
20 | tasks:
21 | - foo:
22 | - full_sequential_finally:
23 | - final1:
24 | - final2:
25 | - bar:
26 | - baz:
27 |
28 | The final1 and final2 tasks will run when full_sequentiall_finally is torn
29 | down, after the nested bar and baz tasks have run to completion, and right
30 | before the preceding foo task is torn down. This is useful if there are
31 | additional steps you want to interject in a job during the shutdown (instead
32 | of startup) phase.
33 |
34 | :param ctx: Context
35 | :param config: Configuration
36 | """
37 | try:
38 | yield
39 | finally:
40 | for entry in config:
41 | if not isinstance(entry, dict):
42 | entry = ctx.config.get(entry, {})
43 | ((taskname, confg),) = entry.items()
44 | log.info('In full_sequential_finally, running task %s...' % taskname)
45 | mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
46 | if hasattr(mgr, '__enter__'):
47 | try:
48 | mgr.__enter__()
49 | finally:
50 | try:
51 | exc_info = sys.exc_info()
52 | mgr.__exit__(*exc_info)
53 | finally:
54 | del exc_info
55 |
--------------------------------------------------------------------------------
/teuthology/util/sentry.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sentry_sdk
3 |
4 | from copy import deepcopy
5 |
6 | from teuthology.config import config as teuth_config
7 | from teuthology.misc import get_http_log_path
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def report_error(job_config, exception, task_name=None):
13 | if not teuth_config.sentry_dsn:
14 | return None
15 | sentry_sdk.init(teuth_config.sentry_dsn)
16 | job_config = deepcopy(job_config)
17 |
18 | tags = {
19 | 'task': task_name,
20 | 'owner': job_config.get("owner"),
21 | }
22 | optional_tags = ('teuthology_branch', 'branch', 'suite',
23 | 'machine_type', 'os_type', 'os_version')
24 | for tag in optional_tags:
25 | if tag in job_config:
26 | tags[tag] = job_config[tag]
27 |
28 | # Remove ssh keys from reported config
29 | if 'targets' in job_config:
30 | targets = job_config['targets']
31 | for host in targets.keys():
32 | targets[host] = ''
33 |
34 | job_id = job_config.get('job_id')
35 | archive_path = job_config.get('archive_path')
36 | extras = dict(config=job_config)
37 | if job_id:
38 | extras['logs'] = get_http_log_path(archive_path, job_id)
39 |
40 | fingerprint = exception.fingerprint() if hasattr(exception, 'fingerprint') else None
41 | exc_id = sentry_sdk.capture_exception(
42 | error=exception,
43 | tags=tags,
44 | extras=extras,
45 | fingerprint=fingerprint,
46 | )
47 | event_url = "{server}/?query={id}".format(
48 | server=teuth_config.sentry_server.strip('/'), id=exc_id)
49 | log.exception(" Sentry event: %s" % event_url)
50 | return event_url
51 |
52 |
53 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/teuthology.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -e
3 | source /teuthology/virtualenv/bin/activate
4 | set -x
5 | cat /run/secrets/id_rsa > $HOME/.ssh/id_rsa
6 | if [ -n "$TEUTHOLOGY_TESTNODES" ]; then
7 | for node in $(echo $TEUTHOLOGY_TESTNODES | tr , ' '); do
8 | teuthology-update-inventory -m "$TEUTHOLOGY_MACHINE_TYPE" "$node"
9 | done
10 | TEUTHOLOGY_CONF=${TEUTHOLOGY_CONF:-}
11 | else
12 | TEUTHOLOGY_CONF=/teuthology/containerized_node.yaml
13 | fi
14 | export TEUTHOLOGY_MACHINE_TYPE=${TEUTHOLOGY_MACHINE_TYPE:-testnode}
15 | if [ "$TEUTHOLOGY_SUITE" != "none" ]; then
16 | if [ -n "$TEUTHOLOGY_BRANCH" ]; then
17 | TEUTH_BRANCH_FLAG="--teuthology-branch $TEUTHOLOGY_BRANCH"
18 | fi
19 | teuthology-suite -v \
20 | $TEUTH_BRANCH_FLAG \
21 | -m "$TEUTHOLOGY_MACHINE_TYPE" \
22 | --newest 100 \
23 | --ceph "${TEUTHOLOGY_CEPH_BRANCH:-main}" \
24 | --ceph-repo "${TEUTHOLOGY_CEPH_REPO:-https://github.com/ceph/ceph.git}" \
25 | --suite "${TEUTHOLOGY_SUITE:-teuthology:no-ceph}" \
26 | --suite-branch "${TEUTHOLOGY_SUITE_BRANCH:-main}" \
27 | --suite-repo "${TEUTHOLOGY_SUITE_REPO:-https://github.com/ceph/ceph.git}" \
28 | --filter-out "libcephfs,kclient" \
29 | --force-priority \
30 | --seed 349 \
31 | ${TEUTHOLOGY_SUITE_EXTRA_ARGS} \
32 | $TEUTHOLOGY_CONF
33 | DISPATCHER_EXIT_FLAG='--exit-on-empty-queue'
34 | teuthology-queue -m $TEUTHOLOGY_MACHINE_TYPE -s | \
35 | python3 -c "import sys, json; assert json.loads(sys.stdin.read())['count'] > 0, 'queue is empty!'"
36 | fi
37 | teuthology-dispatcher -v \
38 | --log-dir /teuthology/log \
39 | --tube "$TEUTHOLOGY_MACHINE_TYPE" \
40 | $DISPATCHER_EXIT_FLAG
41 |
--------------------------------------------------------------------------------
/teuthology/openstack/archive-key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEowIBAAKCAQEAvLz+sao32JL/yMgwTFDTnQVZK3jyXlhQJpHLsgwgHWHQ/27L
3 | fwEbGFVYsJNBGntZwCZvH/K4c0IevbnX/Y69qgmAc9ZpZQLIcIF0A8hmwVYRU+Ap
4 | TAK2qAvadThWfiRBA6+SGoRy6VV5MWeq+hqlGf9axRKqhECNhHuGBuBeosUOZOOH
5 | NVzvFIbp/4842yYrZUDnDzW7JX2kYGi6kaEAYeR8qYJgT/95Pm4Bgu1V7MI36rx1
6 | O/5BSPF3LvDSnnaZyHCDZtwzC50lBnS2nx8kKPmmdKBSEJoTdNRPIXZ/lMq5pzIW
7 | QPDjI8O5pbX1BJcxfFlZ/h+bI6u8IX3vfTGHWwIDAQABAoIBAG5yLp0rHfkXtKT7
8 | OQA/wEW/znmZEkPRbD3VzZyIafanuhTv8heFPyTTNM5Hra5ghpniI99PO07/X1vp
9 | OBMCB81MOCYRT6WzpjXoG0rnZ/I1enhZ0fDQGbFnFlTIPh0c/Aq7IEVyQoh24y/d
10 | GXm4Q+tdufFfRfeUivv/CORXQin/Iugbklj8erjx+fdVKPUXilmDIEVleUncer5/
11 | K5Fxy0lWbm6ZX1fE+rfJvCwNjAaIJgrN8TWUTE8G72F9Y0YU9hRtqOZe6MMbSufy
12 | 5+/yj2Vgp+B8Id7Ass2ylDQKsjBett/M2bNKt/DUVIiaxKi0usNSerLvtbkWEw9s
13 | tgUI6ukCgYEA6qqnZwkbgV0lpj1MrQ3BRnFxNR42z2MyEY5xRGaYp22ByxS207z8
14 | mM3EuLH8k2u6jzsGoPpBWhBbs97MuGDHwsMEO5rBpytnTE4Hxrgec/13Arzk4Bme
15 | eqg1Ji+lNkoLzEHkuihskcZwnQ8uaOdqrnH/NRGuUhA9hjeh+lQzBy8CgYEAzeV1
16 | zYsw8xIBFtbmFhBQ8imHr0SQalTiQU2Qn46LORK0worsf4sZV5ZF3VBRdnCUwwbm
17 | 0XaMb3kE2UBlU8qPqLgxXPNjcEKuqtVlp76dT/lrXIhYUq+Famrf20Lm01kC5itz
18 | QF247hnUfo2uzxpatuEr2ggs2NjuODn57tVw95UCgYEAv0s+C5AxC9OSzWFLEAcW
19 | dwYi8toedBC4z/b9/nRkHJf4JkRMhW6ZuzaCFs2Ax+wZuIi1bqSSgYi0OHx3BhZe
20 | wTWYTb5p/owzONCjJisRKByG14SETuqTdgmIyggs9YSG+Yr9mYM6fdr2EhI+EuYS
21 | 4QGsuOYg5GS4wqC3OglJT6ECgYA8y28QRPQsIXnO259OjnzINDkLKGyX6P5xl8yH
22 | QFidfod/FfQk6NaPxSBV67xSA4X5XBVVbfKji5FB8MC6kAoBIHn63ybSY+4dJSuB
23 | 70eV8KihxuSFbawwMuRsYoGzkAnKGrRKIiJTs67Ju14NatO0QiJnm5haYxtb4MqK
24 | md1kTQKBgDmTxtSBVOV8eMhl076OoOvdnpb3sy/obI/XUvurS0CaAcqmkVSNJ6c+
25 | g1O041ocTbuW5d3fbzo9Jyle6qsvUQd7fuoUfAMrd0inKsuYPPM0IZOExbt8QqLI
26 | KFJ+r/nQYoJkmiNO8PssxcP3CMFB6TpUx0BgFcrhH//TtKKNrGTl
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/teuthology/test/test_get_distro_version.py:
--------------------------------------------------------------------------------
1 | from teuthology.misc import get_distro_version
2 |
3 |
4 | class Mock:
5 | pass
6 |
7 |
8 | class TestGetDistroVersion(object):
9 |
10 | def setup_method(self):
11 | self.fake_ctx = Mock()
12 | self.fake_ctx.config = {}
13 | self.fake_ctx_noarg = Mock()
14 | self.fake_ctx_noarg.config = {}
15 | self.fake_ctx_noarg.os_version = None
16 | self.fake_ctx.os_type = None
17 | self.fake_ctx_noarg.os_type = None
18 |
19 | def test_default_distro_version(self):
20 | # Default distro is ubuntu, default version of ubuntu is 20.04
21 | self.fake_ctx.os_version = None
22 | distroversion = get_distro_version(self.fake_ctx)
23 | assert distroversion == '22.04'
24 |
25 | def test_argument_version(self):
26 | self.fake_ctx.os_version = '13.04'
27 | distroversion = get_distro_version(self.fake_ctx)
28 | assert distroversion == '13.04'
29 |
30 | def test_teuth_config_version(self):
31 | #Argument takes precidence.
32 | self.fake_ctx.os_version = '13.04'
33 | self.fake_ctx.config = {'os_version': '13.10'}
34 | distroversion = get_distro_version(self.fake_ctx)
35 | assert distroversion == '13.04'
36 |
37 | def test_teuth_config_noarg_version(self):
38 | self.fake_ctx_noarg.config = {'os_version': '13.04'}
39 | distroversion = get_distro_version(self.fake_ctx_noarg)
40 | assert distroversion == '13.04'
41 |
42 | def test_no_teuth_config(self):
43 | self.fake_ctx = Mock()
44 | self.fake_ctx.os_type = None
45 | self.fake_ctx.os_version = '13.04'
46 | distroversion = get_distro_version(self.fake_ctx)
47 | assert distroversion == '13.04'
48 |
--------------------------------------------------------------------------------
/teuthology/util/time.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from datetime import datetime, timedelta, timezone
4 |
5 | # When we're not using ISO format, we're using this
6 | TIMESTAMP_FMT = "%Y-%m-%d_%H:%M:%S"
7 |
8 | def parse_timestamp(timestamp: str) -> datetime:
9 | """
10 | timestamp: A string either in ISO 8601 format or TIMESTAMP_FMT.
11 | If no timezone is specified, UTC is assumed.
12 |
13 | :returns: a datetime object
14 | """
15 | try:
16 | dt = datetime.fromisoformat(timestamp)
17 | except ValueError:
18 | dt = datetime.strptime(timestamp, TIMESTAMP_FMT)
19 | if dt.tzinfo is None:
20 | dt = dt.replace(tzinfo=timezone.utc)
21 | return dt
22 |
23 | def parse_offset(offset: str) -> timedelta:
24 | """
25 | offset: A string consisting of digits followed by one of the following
26 | characters:
27 | s: seconds
28 | m: minutes
29 | h: hours
30 | d: days
31 | w: weeks
32 | """
33 | err_msg = "Offsets must either be an ISO 8601-formatted timestamp or " \
34 | f"a relative value like '2w', '1d', '7h', '45m', '90s'. Got: {offset}"
35 | match = re.match(r'(\d+)(s|m|h|d|w)$', offset)
36 | if match is None:
37 | raise ValueError(err_msg)
38 | num = int(match.groups()[0])
39 | unit = match.groups()[1]
40 | match unit:
41 | case 's':
42 | return timedelta(seconds=num)
43 | case 'm':
44 | return timedelta(minutes=num)
45 | case 'h':
46 | return timedelta(hours=num)
47 | case 'd':
48 | return timedelta(days=num)
49 | case 'w':
50 | return timedelta(weeks=num)
51 | case _:
52 | raise ValueError(err_msg)
53 |
--------------------------------------------------------------------------------
/teuthology/util/version.py:
--------------------------------------------------------------------------------
1 | import re
2 | from functools import total_ordering
3 | from typing import Union, List
4 |
5 |
6 | @total_ordering
7 | class LooseVersion:
8 | """
9 | A flexible version comparison class that handles arbitrary version strings.
10 | Compares numeric components numerically and alphabetic components lexically.
11 | """
12 |
13 | _component_re = re.compile(r'(\d+|[a-z]+|\.)', re.IGNORECASE)
14 |
15 | def __init__(self, vstring: str):
16 | self.vstring = str(vstring)
17 | self.version = self._parse(self.vstring)
18 |
19 | def _parse(self, vstring: str) -> List[Union[int, str]]:
20 | """Parse version string into comparable components."""
21 | components = []
22 | for match in self._component_re.finditer(vstring.lower()):
23 | component = match.group()
24 | if component != '.':
25 | # Try to convert to int, fall back to string
26 | try:
27 | components.append(int(component))
28 | except ValueError:
29 | components.append(component)
30 | return components
31 |
32 | def __str__(self) -> str:
33 | return self.vstring
34 |
35 | def __repr__(self) -> str:
36 | return f"{self.__class__.__name__}('{self.vstring}')"
37 |
38 | def __eq__(self, other) -> bool:
39 | if not isinstance(other, LooseVersion):
40 | other = LooseVersion(str(other))
41 | return self.version == other.version
42 |
43 | def __lt__(self, other) -> bool:
44 | if not isinstance(other, LooseVersion):
45 | other = LooseVersion(str(other))
46 | return self.version < other.version
47 |
48 | def __hash__(self) -> int:
49 | return hash(tuple(self.version))
50 |
--------------------------------------------------------------------------------
/scripts/kill.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.config
4 | import teuthology.kill
5 |
6 | doc = """
7 | usage: teuthology-kill -h
8 | teuthology-kill [-a ARCHIVE] [-p] -r RUN
9 | teuthology-kill [-a ARCHIVE] [-p] -m MACHINE_TYPE -r RUN
10 | teuthology-kill [-a ARCHIVE] [-o OWNER] -r RUN -j JOB ...
11 | teuthology-kill [-a ARCHIVE] [-o OWNER] -J JOBSPEC
12 | teuthology-kill [-p] -o OWNER -m MACHINE_TYPE -r RUN
13 |
14 | Kill running teuthology jobs:
15 | 1. Removes any queued jobs from the beanstalk queue
16 | 2. Kills any running jobs
17 | 3. Nukes any machines involved
18 |
19 | NOTE: Must be run on the same machine that is executing the teuthology job
20 | processes.
21 |
22 | optional arguments:
23 | -h, --help show this help message and exit
24 | -a ARCHIVE, --archive ARCHIVE
25 | The base archive directory
26 | [default: {archive_base}]
27 | -p, --preserve-queue Preserve the queue - do not delete queued jobs
28 | -r, --run RUN The name(s) of the run(s) to kill
29 | -j, --job JOB The job_id of the job to kill
30 | -J, --jobspec JOBSPEC
31 | The 'jobspec' of the job to kill. A jobspec consists of
32 | both the name of the run and the job_id, separated by a
33 | '/'. e.g. 'my-test-run/1234'
34 | -o, --owner OWNER The owner of the job(s)
35 | -m, --machine-type MACHINE_TYPE
36 | The type of machine the job(s) are running on.
37 | This is required if killing a job that is still
38 | entirely in the queue.
39 | """.format(archive_base=teuthology.config.config.archive_base)
40 |
41 |
42 | def main():
43 | args = docopt.docopt(doc)
44 | teuthology.kill.main(args)
45 |
--------------------------------------------------------------------------------
/teuthology/provision/cloud/test/test_cloud_init.py:
--------------------------------------------------------------------------------
1 | from teuthology.config import config
2 | from teuthology.provision import cloud
3 |
4 | dummy_config = dict(
5 | providers=dict(
6 | my_provider=dict(
7 | driver='dummy',
8 | driver_args=dict(
9 | creds=0,
10 | ),
11 | conf_1='1',
12 | conf_2='2',
13 | )
14 | )
15 | )
16 |
17 |
18 | class DummyProvider(cloud.base.Provider):
19 | # For libcloud's dummy driver
20 | _driver_posargs = ['creds']
21 |
22 | dummy_drivers = dict(
23 | provider=DummyProvider,
24 | provisioner=cloud.base.Provisioner,
25 | )
26 |
27 |
28 | class TestInit(object):
29 | def setup_method(self):
30 | config.load()
31 | config.libcloud = dummy_config
32 | cloud.supported_drivers['dummy'] = dummy_drivers
33 |
34 | def teardown_method(self):
35 | del cloud.supported_drivers['dummy']
36 |
37 | def test_get_types(self):
38 | assert list(cloud.get_types()) == ['my_provider']
39 |
40 | def test_get_provider_conf(self):
41 | expected = dummy_config['providers']['my_provider']
42 | assert cloud.get_provider_conf('my_provider') == expected
43 |
44 | def test_get_provider(self):
45 | obj = cloud.get_provider('my_provider')
46 | assert obj.name == 'my_provider'
47 | assert obj.driver_name == 'dummy'
48 |
49 | def test_get_provisioner(self):
50 | obj = cloud.get_provisioner(
51 | 'my_provider',
52 | 'node_name',
53 | 'ubuntu',
54 | '16.04',
55 | dict(foo='bar'),
56 | )
57 | assert obj.provider.name == 'my_provider'
58 | assert obj.name == 'node_name'
59 | assert obj.os_type == 'ubuntu'
60 | assert obj.os_version == '16.04'
61 |
--------------------------------------------------------------------------------
/docs/docker-compose/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | export TEUTHOLOGY_BRANCH=${TEUTHOLOGY_BRANCH:-$(git branch --show-current)}
4 | export TEUTH_BRANCH=${TEUTHOLOGY_BRANCH}
5 | if [ -n "$ANSIBLE_INVENTORY_REPO" ]; then
6 | basename=$(basename $ANSIBLE_INVENTORY_REPO | cut -d. -f1)
7 | if [ ! -d "$basename" ]; then
8 | git clone \
9 | --depth 1 \
10 | $ANSIBLE_INVENTORY_REPO
11 | fi
12 | mkdir -p teuthology/ansible_inventory
13 | cp -rf $basename/ansible/ teuthology/ansible_inventory
14 | if [ ! -d teuthology/ansible_inventory/hosts ]; then
15 | mv -f teuthology/ansible_inventory/inventory teuthology/ansible_inventory/hosts
16 | fi
17 | fi
18 | # Make the hosts and secrets directories, so that the COPY instruction in the
19 | # Dockerfile does not cause a build failure when not using this feature.
20 | mkdir -p teuthology/ansible_inventory/hosts teuthology/ansible_inventory/secrets
21 |
22 | if [ -n "$CUSTOM_CONF" ]; then
23 | cp "$CUSTOM_CONF" teuthology/
24 | fi
25 |
26 | # Generate an SSH keypair to use if necessary
27 | if [ -z "$SSH_PRIVKEY_PATH" ]; then
28 | SSH_PRIVKEY_PATH=$(mktemp -u /tmp/teuthology-ssh-key-XXXXXX)
29 | ssh-keygen -t rsa -N '' -f $SSH_PRIVKEY_PATH
30 | export SSH_PRIVKEY=$(cat $SSH_PRIVKEY_PATH)
31 | export SSH_PUBKEY=$(cat $SSH_PRIVKEY_PATH.pub)
32 | export SSH_PRIVKEY_FILE=id_rsa
33 | else
34 | export SSH_PRIVKEY=$(cat $SSH_PRIVKEY_PATH)
35 | export SSH_PRIVKEY_FILE=$(basename $SSH_PRIVKEY_PATH | cut -d. -f1)
36 | fi
37 |
38 | if [ -z "$TEUTHOLOGY_WAIT" ]; then
39 | DC_EXIT_FLAG='--abort-on-container-exit --exit-code-from teuthology'
40 | DC_AUTO_DOWN_CMD='docker compose down'
41 | fi
42 | export TEUTHOLOGY_WAIT
43 |
44 | trap "docker compose down" SIGINT
45 | docker compose up \
46 | --build \
47 | $DC_EXIT_FLAG
48 | $DC_AUTO_DOWN_CMD
49 |
--------------------------------------------------------------------------------
/scripts/report.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.report
4 |
5 | doc = """
6 | usage:
7 | teuthology-report -h
8 | teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] [-D] -r RUN ...
9 | teuthology-report [-v] [-s SERVER] [-a ARCHIVE] [-D] -r RUN -j JOB ...
10 | teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] --all-runs
11 |
12 | Submit test results to a web service
13 |
14 | optional arguments:
15 | -h, --help show this help message and exit
16 | -a ARCHIVE, --archive ARCHIVE
17 | The base archive directory
18 | [default: {archive_base}]
19 | -r [RUN ...], --run [RUN ...]
20 | A run (or list of runs) to submit
21 | -j [JOB ...], --job [JOB ...]
22 | A job (or list of jobs) to submit
23 | --all-runs Submit all runs in the archive
24 | -R, --refresh Re-push any runs already stored on the server. Note
25 | that this may be slow.
26 | -s SERVER, --server SERVER
27 | "The server to post results to, e.g.
28 | http://localhost:8080/ . May also be specified in
29 | ~/.teuthology.yaml as 'results_server'
30 | -n, --no-save By default, when submitting all runs, we remember the
31 | last successful submission in a file called
32 | 'last_successful_run'. Pass this flag to disable that
33 | behavior.
34 | -D, --dead Mark all given jobs (or entire runs) with status
35 | 'dead'. Implies --refresh.
36 | -v, --verbose be more verbose
37 | """.format(archive_base=teuthology.config.config.archive_base)
38 |
39 |
40 | def main():
41 | args = docopt.docopt(doc)
42 | teuthology.report.main(args)
43 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - zypper --non-interactive --gpg-auto-import-keys refresh
20 | - zypper --non-interactive remove --force librados2 librbd1 multipath-tools-rbd qemu-block-rbd ntp
21 | - zypper --non-interactive install --no-recommends --force wget git-core rsyslog lsb-release make gcc gcc-c++ salt-master salt-minion salt-api chrony
22 | - systemctl enable chronyd.service
23 | - systemctl start chronyd.service
24 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
25 | - sleep 30
26 | final_message: "{up}, after $UPTIME seconds"
27 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-12.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | - SuSEfirewall2 stop
12 | preserve_hostname: true
13 | users:
14 | - name: {username}
15 | gecos: User
16 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
17 | groups: users
18 | runcmd:
19 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
22 | lsb-release salt-minion salt-master make gcc gcc-c++
23 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
24 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
25 | - systemctl enable salt-minion.service ntpd.service
26 | - systemctl restart ntpd.service
27 | final_message: "{up}, after $UPTIME seconds"
28 |
--------------------------------------------------------------------------------
/teuthology/task/localdir.py:
--------------------------------------------------------------------------------
1 | """
2 | Localdir
3 | """
4 | import contextlib
5 | import logging
6 | import os
7 |
8 | from teuthology import misc as teuthology
9 |
10 | log = logging.getLogger(__name__)
11 |
12 |
13 | @contextlib.contextmanager
14 | def task(ctx, config):
15 | """
16 | Create a mount dir 'client' that is just the local disk:
17 |
18 | Example that "mounts" all clients:
19 |
20 | tasks:
21 | - localdir:
22 | - interactive:
23 |
24 | Example for a specific client:
25 |
26 | tasks:
27 | - localdir: [client.2]
28 | - interactive:
29 |
30 | :param ctx: Context
31 | :param config: Configuration
32 | """
33 | log.info('Creating local mnt dirs...')
34 |
35 | testdir = teuthology.get_testdir(ctx)
36 |
37 | if config is None:
38 | config = list('client.{id}'.format(id=id_)
39 | for id_ in teuthology.all_roles_of_type(ctx.cluster,
40 | 'client'))
41 |
42 | clients = list(teuthology.get_clients(ctx=ctx, roles=config))
43 | for id_, remote in clients:
44 | mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
45 | log.info('Creating dir {remote} {mnt}...'.format(
46 | remote=remote, mnt=mnt))
47 | remote.run(
48 | args=[
49 | 'mkdir',
50 | '--',
51 | mnt,
52 | ],
53 | )
54 |
55 | try:
56 | yield
57 |
58 | finally:
59 | log.info('Removing local mnt dirs...')
60 | for id_, remote in clients:
61 | mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
62 | remote.run(
63 | args=[
64 | 'rm',
65 | '-rf',
66 | '--',
67 | mnt,
68 | ],
69 | )
70 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-42.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - zypper --non-interactive --no-gpg-checks refresh
20 | - zypper --non-interactive remove systemd-logger
21 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
22 | lsb-release salt-minion salt-master make
23 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
24 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
25 | - systemctl enable salt-minion.service ntpd.service
26 | - systemctl restart ntpd.service
27 | final_message: "{up}, after $UPTIME seconds"
28 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-42.3-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - 'zypper rr openSUSE-Leap-Cloud-Tools || :'
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive remove systemd-logger
22 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog lsb-release make gcc gcc-c++
23 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
24 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
25 | - systemctl enable ntpd.service
26 | - systemctl restart ntpd.service
27 | final_message: "{up}, after $UPTIME seconds"
28 |
--------------------------------------------------------------------------------
/scripts/dispatcher.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 |
4 | import teuthology.dispatcher.supervisor
5 |
6 | from .supervisor import parse_args as parse_supervisor_args
7 |
8 |
9 | def parse_args(argv):
10 | parser = argparse.ArgumentParser(
11 | description="Start a dispatcher for the specified tube. Grab jobs from a beanstalk queue and run the teuthology tests they describe as subprocesses. The subprocess invoked is teuthology-supervisor."
12 | )
13 | parser.add_argument(
14 | "-v",
15 | "--verbose",
16 | action="store_true",
17 | help="be more verbose",
18 | )
19 | parser.add_argument(
20 | "-a",
21 | "--archive-dir",
22 | type=str,
23 | help="path to archive results in",
24 | )
25 | parser.add_argument(
26 | "-t",
27 | "--tube",
28 | type=str,
29 | help="which beanstalk tube to read jobs from",
30 | required=True,
31 | )
32 | parser.add_argument(
33 | "-l",
34 | "--log-dir",
35 | type=str,
36 | help="path in which to store the dispatcher log",
37 | required=True,
38 | )
39 | parser.add_argument(
40 | "--exit-on-empty-queue",
41 | action="store_true",
42 | help="if the queue is empty, exit",
43 | )
44 | return parser.parse_args(argv)
45 |
46 |
47 | def main():
48 | if "--supervisor" in sys.argv:
49 | # This is for transitional compatibility, so the old dispatcher can
50 | # invoke the new supervisor. Once old dispatchers are phased out,
51 | # this block can be as well.
52 | sys.argv.remove("--supervisor")
53 | sys.argv[0] = "teuthology-supervisor"
54 | sys.exit(teuthology.dispatcher.supervisor.main(
55 | parse_supervisor_args(sys.argv[1:])
56 | ))
57 | else:
58 | sys.exit(teuthology.dispatcher.main(parse_args(sys.argv[1:])))
59 |
60 |
61 | if __name__ == "__main__":
62 | main()
63 |
--------------------------------------------------------------------------------
/teuthology/task/args.py:
--------------------------------------------------------------------------------
1 | """
2 | These routines only appear to be used by the peering_speed tests.
3 | """
4 | def gen_args(name, args):
5 | """
6 | Called from argify to generate arguments.
7 | """
8 | usage = [""]
9 | usage += [name + ':']
10 | usage += \
11 | [" {key}: <{usage}> ({default})".format(
12 | key=key, usage=_usage, default=default)
13 | for (key, _usage, default, _) in args]
14 | usage.append('')
15 | usage.append(name + ':')
16 | usage += \
17 | [" {key}: {default}".format(
18 | key = key, default = default)
19 | for (key, _, default, _) in args]
20 | usage = '\n'.join(' ' + i for i in usage)
21 | def ret(config):
22 | """
23 | return an object with attributes set from args.
24 | """
25 | class Object(object):
26 | """
27 | simple object
28 | """
29 | pass
30 | obj = Object()
31 | for (key, usage, default, conv) in args:
32 | if key in config:
33 | setattr(obj, key, conv(config[key]))
34 | else:
35 | setattr(obj, key, conv(default))
36 | return obj
37 | return usage, ret
38 |
39 | def argify(name, args):
40 | """
41 | Object used as a decorator for the peering speed tests.
42 | See peering_spee_test.py
43 | """
44 | (usage, config_func) = gen_args(name, args)
45 | def ret1(f):
46 | """
47 | Wrapper to handle doc and usage information
48 | """
49 | def ret2(**kwargs):
50 | """
51 | Call f (the parameter passed to ret1)
52 | """
53 | config = kwargs.get('config', {})
54 | if config is None:
55 | config = {}
56 | kwargs['config'] = config_func(config)
57 | return f(**kwargs)
58 | ret2.__doc__ = f.__doc__ + usage
59 | return ret2
60 | return ret1
61 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-42.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - 'zypper rr openSUSE-Leap-Cloud-Tools || :'
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive remove systemd-logger
22 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
23 | lsb-release salt-minion salt-master make gcc gcc-c++
24 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
25 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
26 | - systemctl enable salt-minion.service ntpd.service
27 | - systemctl restart ntpd.service
28 | final_message: "{up}, after $UPTIME seconds"
29 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-15.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
6 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
7 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
8 | preserve_hostname: true
9 | users:
10 | - name: {username}
11 | gecos: User
12 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
13 | groups: users
14 | runcmd:
15 | - |
16 | for i in $(seq 1 30) ; do
17 | ping -q -c 1 8.8.8.8 && break
18 | sleep 10
19 | done
20 | ETH=$(ip route list | grep "scope link" | cut -f 3 -d ' ')
21 | sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-$ETH
22 | (
23 | curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname |
24 | sed -e 's/[\.-].*//'
25 | eval printf "%03d%03d%03d%03d.{lab_domain}" $(
26 | curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 |
27 | tr . ' ' )
28 | ) | tee /etc/hostname
29 | hostname $(cat /etc/hostname)
30 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
31 | - zypper --non-interactive --no-gpg-checks refresh
32 | - zypper --non-interactive install --no-recommends wget rsyslog lsb-release make gcc gcc-c++ chrony
33 | - sed -i -e 's/^! pool/pool/' /etc/chrony.conf
34 | - systemctl enable chronyd.service
35 | - systemctl start chronyd.service
36 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
37 | final_message: "{up}, after $UPTIME seconds"
38 |
--------------------------------------------------------------------------------
/teuthology/util/test/test_time.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from datetime import datetime, timedelta, timezone
4 | from typing import Type
5 |
6 | from teuthology.util import time
7 |
8 |
9 | @pytest.mark.parametrize(
10 | ["timestamp", "result"],
11 | [
12 | ["1999-12-31_23:59:59", datetime(1999, 12, 31, 23, 59, 59, tzinfo=timezone.utc)],
13 | ["1999-12-31_23:59", datetime(1999, 12, 31, 23, 59, 0, tzinfo=timezone.utc)],
14 | ["1999-12-31T23:59:59", datetime(1999, 12, 31, 23, 59, 59, tzinfo=timezone.utc)],
15 | ["1999-12-31T23:59:59+00:00", datetime(1999, 12, 31, 23, 59, 59, tzinfo=timezone.utc)],
16 | ["1999-12-31T17:59:59-06:00", datetime(1999, 12, 31, 23, 59, 59, tzinfo=timezone.utc)],
17 | ["2024-01-01", datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)],
18 | ["tomorrow", ValueError],
19 | ["1d", ValueError],
20 | ["", ValueError],
21 | ["2024", ValueError],
22 |
23 | ]
24 | )
25 | def test_parse_timestamp(timestamp: str, result: datetime | Type[Exception]):
26 | if isinstance(result, datetime):
27 | assert time.parse_timestamp(timestamp) == result
28 | else:
29 | with pytest.raises(result):
30 | time.parse_timestamp(timestamp)
31 |
32 |
33 | @pytest.mark.parametrize(
34 | ["offset", "result"],
35 | [
36 | ["1s", timedelta(seconds=1)],
37 | ["1m", timedelta(minutes=1)],
38 | ["1h", timedelta(hours=1)],
39 | ["1d", timedelta(days=1)],
40 | ["1w", timedelta(weeks=1)],
41 | ["365d", timedelta(days=365)],
42 | ["1x", ValueError],
43 | ["-1m", ValueError],
44 | ["0xde", ValueError],
45 | ["frog", ValueError],
46 | ["7dwarfs", ValueError],
47 | ]
48 | )
49 | def test_parse_offset(offset: str, result: timedelta | Type[Exception]):
50 | if isinstance(result, timedelta):
51 | assert time.parse_offset(offset) == result
52 | else:
53 | with pytest.raises(result):
54 | time.parse_offset(offset)
55 |
--------------------------------------------------------------------------------
/teuthology/task/internal/vm_setup.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import subprocess
4 |
5 | from teuthology.parallel import parallel
6 | from teuthology.task import ansible
7 | from teuthology.exceptions import CommandFailedError
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def vm_setup(ctx, config):
13 | """
14 | Look for virtual machines and handle their initialization
15 | """
16 | all_tasks = [list(x.keys())[0] for x in ctx.config['tasks']]
17 | need_ansible = False
18 | if 'kernel' in all_tasks and 'ansible.cephlab' not in all_tasks:
19 | need_ansible = True
20 | ansible_hosts = set()
21 | with parallel():
22 | editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh')
23 | for rem in ctx.cluster.remotes.keys():
24 | if rem.is_vm:
25 | ansible_hosts.add(rem.shortname)
26 | try:
27 | rem.sh('test -e /ceph-qa-ready')
28 | except CommandFailedError:
29 | p1 = subprocess.Popen(['cat', editinfo],
30 | stdout=subprocess.PIPE)
31 | p2 = subprocess.Popen(
32 | [
33 | 'ssh',
34 | '-o', 'StrictHostKeyChecking=no',
35 | '-t', '-t',
36 | str(rem),
37 | 'sudo',
38 | 'sh'
39 | ],
40 | stdin=p1.stdout, stdout=subprocess.PIPE
41 | )
42 | _, err = p2.communicate()
43 | if err:
44 | log.error("Edit of /etc/sudoers failed: %s", err)
45 | if need_ansible and ansible_hosts:
46 | log.info("Running ansible on %s", list(ansible_hosts))
47 | ansible_config = dict(
48 | hosts=list(ansible_hosts),
49 | )
50 | with ansible.CephLab(ctx, config=ansible_config):
51 | pass
52 |
--------------------------------------------------------------------------------
/docs/exporter.rst:
--------------------------------------------------------------------------------
1 | .. _exporter:
2 |
3 | ==================================
4 | The Teuthology Prometheus Exporter
5 | ==================================
6 |
7 | To help make it easier to determine the status of the lab, we've created a
8 | `Prometheus `__ exporter (helpfully named
9 | `teuthology-exporter`. We use `Grafana `__ to visualize
10 | the data we collect.
11 |
12 | It listens on port 61764, and scrapes every 60 seconds by default.
13 |
14 |
15 | Exposed Metrics
16 | ===============
17 |
18 | .. list-table::
19 |
20 | * - Name
21 | - Type
22 | - Description
23 | - Labels
24 | * - beanstalk_queue_length
25 | - Gauge
26 | - The number of jobs in the beanstalkd queue
27 | - machine type
28 | * - beanstalk_queue_paused
29 | - Gauge
30 | - Whether or not the beanstalkd queue is paused
31 | - machine type
32 | * - teuthology_dispatchers
33 | - Gauge
34 | - The number of running teuthology-dispatcher instances
35 | - machine type
36 | * - teuthology_job_processes
37 | - Gauge
38 | - The number of running job *processes*
39 | -
40 | * - teuthology_job_results_total
41 | - Gauge
42 | - The number of completed jobs
43 | - status (pass/fail/dead)
44 | * - teuthology_nodes
45 | - Gauge
46 | - The number of test nodes
47 | - up, locked
48 | * - teuthology_job_duration_seconds
49 | - Summary
50 | - The time it took to run a job
51 | - suite
52 | * - teuthology_task_duration_seconds
53 | - Summary
54 | - The time it took for each phase of each task to run
55 | - name, phase (enter/exit)
56 | * - teuthology_bootstrap_duration_seconds
57 | - Summary
58 | - The time it took to run teuthology's bootstrap script
59 | -
60 | * - teuthology_node_locking_duration_seconds
61 | - Summary
62 | - The time it took to lock nodes
63 | - machine type, count
64 | * - teuthology_node_reimaging_duration_seconds
65 | - Summary
66 | - The time it took to reimage nodes
67 | - machine type, count
68 |
--------------------------------------------------------------------------------
/teuthology/test/test_kill.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | from teuthology.kill import find_targets
4 |
5 |
6 | class TestFindTargets(object):
7 | """ Tests for teuthology.kill.find_targets """
8 |
9 | @patch('teuthology.kill.report.ResultsReporter.get_jobs')
10 | def test_missing_run_find_targets(self, m_get_jobs):
11 | m_get_jobs.return_value = []
12 | run_targets = find_targets("run-name")
13 | assert run_targets == {}
14 |
15 | @patch('teuthology.kill.report.ResultsReporter.get_jobs')
16 | def test_missing_job_find_targets(self, m_get_jobs):
17 | m_get_jobs.return_value = {}
18 | job_targets = find_targets("run-name", "3")
19 | assert job_targets == {}
20 |
21 | @patch('teuthology.kill.report.ResultsReporter.get_jobs')
22 | def test_missing_run_targets_find_targets(self, m_get_jobs):
23 | m_get_jobs.return_value = [{"targets": None, "status": "waiting"}]
24 | run_targets = find_targets("run-name")
25 | assert run_targets == {}
26 |
27 | @patch('teuthology.kill.report.ResultsReporter.get_jobs')
28 | def test_missing_job_targets_find_targets(self, m_get_jobs):
29 | m_get_jobs.return_value = {"targets": None}
30 | job_targets = find_targets("run-name", "3")
31 | assert job_targets == {}
32 |
33 | @patch('teuthology.kill.report.ResultsReporter.get_jobs')
34 | def test_run_find_targets(self, m_get_jobs):
35 | m_get_jobs.return_value = [{"targets": {"node1": ""}, "status": "running"}]
36 | run_targets = find_targets("run-name")
37 | assert run_targets == {"node1": ""}
38 | m_get_jobs.return_value = [{"targets": {"node1": ""}}]
39 | run_targets = find_targets("run-name")
40 | assert run_targets == {}
41 |
42 | @patch('teuthology.kill.report.ResultsReporter.get_jobs')
43 | def test_job_find_targets(self, m_get_jobs):
44 | m_get_jobs.return_value = {"targets": {"node1": ""}}
45 | job_targets = find_targets("run-name", "3")
46 | assert job_targets == {"node1": ""}
47 |
--------------------------------------------------------------------------------
/teuthology/test/test_job_status.py:
--------------------------------------------------------------------------------
1 | from teuthology import job_status
2 |
3 |
4 | class TestJobStatus(object):
5 | def test_get_only_success_true(self):
6 | summary = dict(success=True)
7 | status = job_status.get_status(summary)
8 | assert status == 'pass'
9 |
10 | def test_get_only_success_false(self):
11 | summary = dict(success=False)
12 | status = job_status.get_status(summary)
13 | assert status == 'fail'
14 |
15 | def test_get_status_pass(self):
16 | summary = dict(status='pass')
17 | status = job_status.get_status(summary)
18 | assert status == 'pass'
19 |
20 | def test_get_status_fail(self):
21 | summary = dict(status='fail')
22 | status = job_status.get_status(summary)
23 | assert status == 'fail'
24 |
25 | def test_get_status_dead(self):
26 | summary = dict(status='dead')
27 | status = job_status.get_status(summary)
28 | assert status == 'dead'
29 |
30 | def test_get_status_none(self):
31 | summary = dict()
32 | status = job_status.get_status(summary)
33 | assert status is None
34 |
35 | def test_set_status_pass(self):
36 | summary = dict()
37 | job_status.set_status(summary, 'pass')
38 | assert summary == dict(status='pass', success=True)
39 |
40 | def test_set_status_dead(self):
41 | summary = dict()
42 | job_status.set_status(summary, 'dead')
43 | assert summary == dict(status='dead', success=False)
44 |
45 | def test_set_then_get_status_dead(self):
46 | summary = dict()
47 | job_status.set_status(summary, 'dead')
48 | status = job_status.get_status(summary)
49 | assert status == 'dead'
50 |
51 | def test_set_status_none(self):
52 | summary = dict()
53 | job_status.set_status(summary, None)
54 | assert summary == dict()
55 |
56 | def test_legacy_fail(self):
57 | summary = dict(success=True)
58 | summary['success'] = False
59 | status = job_status.get_status(summary)
60 | assert status == 'fail'
61 |
--------------------------------------------------------------------------------
/teuthology/reimage.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 |
4 | import teuthology
5 |
6 | from teuthology.parallel import parallel
7 | from teuthology.provision import reimage, get_reimage_types
8 | from teuthology.lock import query, ops
9 | from teuthology.misc import get_user
10 | from teuthology.misc import decanonicalize_hostname as shortname
11 |
12 | log = logging.getLogger(__name__)
13 |
14 | def main(args):
15 | if (args['--verbose']):
16 | teuthology.log.setLevel(logging.DEBUG)
17 |
18 | ctx = argparse.Namespace()
19 | ctx.os_type = args['--os-type']
20 | ctx.os_version = args['--os-version']
21 |
22 | nodes = args['']
23 |
24 | reimage_types = get_reimage_types()
25 | statuses = query.get_statuses(nodes)
26 | owner = args['--owner'] or get_user()
27 | unlocked = [shortname(_['name'])
28 | for _ in statuses if not _['locked']]
29 | if unlocked:
30 | log.error(
31 | "Some of the nodes are not locked: %s", unlocked)
32 | exit(1)
33 |
34 | improper = [shortname(_['name']) for _ in statuses if _['locked_by'] != owner]
35 | if improper:
36 | log.error(
37 | "Some of the nodes are not owned by '%s': %s", owner, improper)
38 | exit(1)
39 |
40 | irreimageable = [shortname(_['name']) for _ in statuses
41 | if _['machine_type'] not in reimage_types]
42 | if irreimageable:
43 | log.error(
44 | "Following nodes cannot be reimaged because theirs machine type "
45 | "is not reimageable: %s", irreimageable)
46 | exit(1)
47 |
48 | def reimage_node(ctx, machine_name, machine_type):
49 | ops.update_nodes([machine_name], True)
50 | reimage(ctx, machine_name, machine_type)
51 | ops.update_nodes([machine_name])
52 | log.debug("Node '%s' reimaging is complete", machine_name)
53 |
54 | with parallel() as p:
55 | for node in statuses:
56 | log.debug("Start node '%s' reimaging", node['name'])
57 | p.spawn(reimage_node, ctx, shortname(node['name']), node['machine_type'])
58 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = docs, py3, flake8
3 | isolated_build = True
4 |
5 | [testenv]
6 | setenv =
7 | LC_ALL=en_US.UTF-8
8 | LANG=en_US
9 |
10 | [testenv:py3]
11 | install_command = pip install --upgrade {opts} {packages}
12 | passenv = HOME
13 | deps=
14 | -r{toxinidir}/requirements.txt
15 | pytest-cov
16 | coverage
17 | mock
18 | extras = test
19 | log_format = %(asctime)s %(levelname)s %(message)s
20 | commands=
21 | python -m pytest --cov=teuthology --cov-report=term -v {posargs:teuthology scripts}
22 |
23 | [testenv:flake8]
24 | install_command = pip install --upgrade {opts} {packages}
25 | deps=
26 | flake8
27 | commands=flake8 --select=F,E9 {posargs:teuthology scripts}
28 |
29 | [testenv:docs]
30 | install_command = pip install --upgrade {opts} {packages}
31 | changedir=docs
32 | deps=
33 | -r{toxinidir}/requirements.txt
34 | sphinx != 7.2.0, != 7.2.1, != 7.2.2
35 | sphinxcontrib-programoutput
36 | commands=
37 | sphinx-apidoc -f -o . ../teuthology ../teuthology/test ../teuthology/orchestra/test ../teuthology/task/test
38 | sphinx-build -b html -d {envtmpdir}/doctrees . {envtmpdir}/html
39 |
40 | [testenv:openstack]
41 | install_command = pip install --upgrade {opts} {packages}
42 | passenv =
43 | HOME
44 | OS_REGION_NAME
45 | OS_AUTH_URL
46 | OS_TENANT_ID
47 | OS_TENANT_NAME
48 | OS_PASSWORD
49 | OS_USERNAME
50 | deps=
51 | -r{toxinidir}/requirements.txt
52 | extras = test
53 | commands=py.test -v {posargs:teuthology/openstack/test/test_openstack.py}
54 |
55 | [testenv:openstack-integration]
56 | passenv =
57 | HOME
58 | OS_REGION_NAME
59 | OS_AUTH_URL
60 | OS_TENANT_ID
61 | OS_TENANT_NAME
62 | OS_PASSWORD
63 | OS_USERNAME
64 | deps=
65 | -r{toxinidir}/requirements.txt
66 | extras = test
67 | commands=
68 | py.test -v {posargs} teuthology/openstack/test/openstack-integration.py
69 |
70 | [testenv:openstack-delegate]
71 | passenv =
72 | HOME
73 | OS_REGION_NAME
74 | OS_AUTH_URL
75 | OS_TENANT_ID
76 | OS_TENANT_NAME
77 | OS_PASSWORD
78 | OS_USERNAME
79 | sitepackages=True
80 | deps=
81 | -r{toxinidir}/requirements.txt
82 | commands={toxinidir}/openstack-delegate.sh
83 |
--------------------------------------------------------------------------------
/docs/_static/create_nodes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # A sample script that can be used while setting up a new teuthology lab
3 | # This script will connect to the machines in your lab, and populate a
4 | # paddles instance with their information.
5 | #
6 | # You WILL need to modify it.
7 |
8 | import logging
9 | import sys
10 | from teuthology.orchestra.remote import Remote
11 | from teuthology.lock.ops import update_inventory
12 |
13 | paddles_url = 'http://paddles.example.com/nodes/'
14 |
15 | machine_type = 'typica'
16 | lab_domain = 'example.com'
17 | # Don't change the user. It won't work at this time.
18 | user = 'ubuntu'
19 | # We are populating 'typica003' -> 'typica192'
20 | machine_index_range = range(3, 192)
21 |
22 | log = logging.getLogger(sys.argv[0])
23 | logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
24 | logging.WARNING)
25 |
26 |
27 | def get_shortname(machine_type, index):
28 | """
29 | Given a number, return a hostname. Example:
30 | get_shortname('magna', 3) = 'magna003'
31 |
32 | Modify to suit your needs.
33 | """
34 | return machine_type + str(index).rjust(3, '0')
35 |
36 |
37 | def get_info(user, fqdn):
38 | remote = Remote('@'.join((user, fqdn)))
39 | return remote.inventory_info
40 |
41 |
42 | def main():
43 | shortnames = [get_shortname(machine_type, i) for i in machine_index_range]
44 | fqdns = ['.'.join((name, lab_domain)) for name in shortnames]
45 | for fqdn in fqdns:
46 | log.info("Creating %s", fqdn)
47 | base_info = dict(
48 | name=fqdn,
49 | locked=True,
50 | locked_by='initial@setup',
51 | machine_type=machine_type,
52 | description="Initial node creation",
53 | )
54 | try:
55 | info = get_info(user, fqdn)
56 | base_info.update(info)
57 | base_info['up'] = True
58 | except Exception as exc:
59 | log.error("{fqdn} is down".format(fqdn=fqdn))
60 | base_info['up'] = False
61 | base_info['description'] = repr(exc)
62 | update_inventory(base_info)
63 |
64 | if __name__ == '__main__':
65 | main()
66 |
--------------------------------------------------------------------------------
/teuthology/task/background_exec.py:
--------------------------------------------------------------------------------
1 | """
2 | Background task
3 | """
4 |
5 | import contextlib
6 | import logging
7 |
8 | from teuthology import misc
9 | from teuthology.orchestra import run
10 |
11 | log = logging.getLogger(__name__)
12 |
13 |
14 | @contextlib.contextmanager
15 | def task(ctx, config):
16 | """
17 | Run a background task.
18 |
19 | Run the given command on a client, similar to exec. However, when
20 | we hit the finally because the subsequent task is ready to exit, kill
21 | the child process.
22 |
23 | We do not do any error code checking here since we are forcefully killing
24 | off the child when we are done.
25 |
26 | If the command a list, we simply join it with ;'s.
27 |
28 | Example::
29 |
30 | tasks:
31 | - install:
32 | - background_exec:
33 | client.0: while true ; do date ; sleep 1 ; done
34 | client.1:
35 | - while true
36 | - do id
37 | - sleep 1
38 | - done
39 | - exec:
40 | client.0:
41 | - sleep 10
42 |
43 | """
44 | assert isinstance(config, dict), "task background got invalid config"
45 |
46 | testdir = misc.get_testdir(ctx)
47 |
48 | tasks = {}
49 | for role, cmd in config.items():
50 | (remote,) = ctx.cluster.only(role).remotes.keys()
51 | log.info('Running background command on role %s host %s', role,
52 | remote.name)
53 | if isinstance(cmd, list):
54 | cmd = '; '.join(cmd)
55 | cmd.replace('$TESTDIR', testdir)
56 | tasks[remote.name] = remote.run(
57 | args=[
58 | 'sudo',
59 | 'TESTDIR=%s' % testdir,
60 | 'daemon-helper', 'kill', '--kill-group',
61 | 'bash', '-c', cmd,
62 | ],
63 | wait=False,
64 | stdin=run.PIPE,
65 | check_status=False,
66 | logger=log.getChild(remote.name)
67 | )
68 |
69 | try:
70 | yield
71 |
72 | finally:
73 | for name, task in tasks.items():
74 | log.info('Stopping background command on %s', name)
75 | task.stdin.close()
76 | run.wait(tasks.values())
77 |
--------------------------------------------------------------------------------
/teuthology/ls.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 | import yaml
5 | import errno
6 | import re
7 |
8 | from teuthology.job_status import get_status
9 |
10 |
11 | def main(args):
12 | return ls(args[""], args["--verbose"])
13 |
14 |
15 | def ls(archive_dir, verbose):
16 | for j in get_jobs(archive_dir):
17 | job_dir = os.path.join(archive_dir, j)
18 | summary = {}
19 | try:
20 | with open(os.path.join(job_dir, 'summary.yaml')) as f:
21 | g = yaml.safe_load_all(f)
22 | for new in g:
23 | summary.update(new)
24 | except IOError as e:
25 | if e.errno == errno.ENOENT:
26 | print_debug_info(j, job_dir, archive_dir)
27 | continue
28 | else:
29 | raise
30 |
31 | print("{job} {status} {owner} {desc} {duration}s".format(
32 | job=j,
33 | owner=summary.get('owner', '-'),
34 | desc=summary.get('description', '-'),
35 | status=get_status(summary),
36 | duration=int(summary.get('duration', 0)),
37 | ))
38 | if verbose and 'failure_reason' in summary:
39 | print(' {reason}'.format(reason=summary['failure_reason']))
40 |
41 |
42 | def get_jobs(archive_dir):
43 | dir_contents = os.listdir(archive_dir)
44 |
45 | def is_job_dir(parent, subdir):
46 | if (os.path.isdir(os.path.join(parent, subdir)) and re.match(r'\d+$',
47 | subdir)):
48 | return True
49 | return False
50 |
51 | jobs = [job for job in dir_contents if is_job_dir(archive_dir, job)]
52 | return sorted(jobs)
53 |
54 |
55 | def print_debug_info(job, job_dir, archive_dir):
56 | print('%s ' % job, end='')
57 |
58 | try:
59 | log_path = os.path.join(archive_dir, job, 'teuthology.log')
60 | if os.path.exists(log_path):
61 | tail = os.popen(
62 | 'tail -1 %s' % log_path
63 | ).read().rstrip()
64 | print(tail, end='')
65 | else:
66 | print('', end='')
67 | except IOError:
68 | pass
69 | print('')
70 |
--------------------------------------------------------------------------------
/teuthology/exit.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import signal
4 |
5 |
6 | log = logging.getLogger(__name__)
7 |
8 |
9 | class Exiter(object):
10 | """
11 | A helper to manage any signal handlers we need to call upon receiving a
12 | given signal
13 | """
14 | def __init__(self):
15 | self.handlers = list()
16 |
17 | def add_handler(self, signals, func):
18 | """
19 | Adds a handler function to be called when any of the given signals are
20 | received.
21 |
22 | The handler function should have a signature like::
23 |
24 | my_handler(signal, frame)
25 | """
26 | if isinstance(signals, int):
27 | signals = [signals]
28 |
29 | for signal_ in signals:
30 | signal.signal(signal_, self.default_handler)
31 |
32 | handler = Handler(self, func, signals)
33 | log.debug(
34 | "Installing handler: %s",
35 | repr(handler),
36 | )
37 | self.handlers.append(handler)
38 | return handler
39 |
40 | def default_handler(self, signal_, frame):
41 | log.debug(
42 | "Got signal %s; running %s handler%s...",
43 | signal_,
44 | len(self.handlers),
45 | '' if len(self.handlers) == 1 else 's',
46 | )
47 | for handler in self.handlers:
48 | handler.func(signal_, frame)
49 | log.debug("Finished running handlers")
50 | # Restore the default handler
51 | signal.signal(signal_, 0)
52 | # Re-send the signal to our main process
53 | os.kill(os.getpid(), signal_)
54 |
55 |
56 | class Handler(object):
57 | def __init__(self, exiter, func, signals):
58 | self.exiter = exiter
59 | self.func = func
60 | self.signals = signals
61 |
62 | def remove(self):
63 | try:
64 | log.debug("Removing handler: %s", self)
65 | self.exiter.handlers.remove(self)
66 | except ValueError:
67 | pass
68 |
69 | def __repr__(self):
70 | return "{c}(exiter={e}, func={f}, signals={s})".format(
71 | c=self.__class__.__name__,
72 | e=self.exiter,
73 | f=self.func,
74 | s=self.signals,
75 | )
76 |
77 |
78 | exiter = Exiter()
79 |
--------------------------------------------------------------------------------
/teuthology/task/parallel.py:
--------------------------------------------------------------------------------
1 | """
2 | Task to group parallel running tasks
3 | """
4 | import sys
5 | import logging
6 |
7 | from teuthology import run_tasks
8 | from teuthology import parallel
9 |
10 | log = logging.getLogger(__name__)
11 |
12 |
13 | def task(ctx, config):
14 | """
15 | Run a group of tasks in parallel.
16 |
17 | example::
18 |
19 | - parallel:
20 | - tasktest:
21 | - tasktest:
22 |
23 | You can also define tasks in a top-level section outside of
24 | 'tasks:', and reference them here.
25 |
26 | The referenced section must contain a list of tasks to run
27 | sequentially, or a single task as a dict. The latter is only
28 | available for backwards compatibility with existing suites::
29 |
30 | tasks:
31 | - parallel:
32 | - tasktest: # task inline
33 | - foo # reference to top-level 'foo' section
34 | - bar # reference to top-level 'bar' section
35 | foo:
36 | - tasktest1:
37 | - tasktest2:
38 | bar:
39 | tasktest: # note the list syntax from 'foo' is preferred
40 |
41 | That is, if the entry is not a dict, we will look it up in the top-level
42 | config.
43 |
44 | Sequential tasks and Parallel tasks can be nested.
45 | """
46 |
47 | log.info('starting parallel...')
48 | with parallel.parallel() as p:
49 | for entry in config:
50 | if not isinstance(entry, dict):
51 | entry = ctx.config.get(entry, {})
52 | # support the usual list syntax for tasks
53 | if isinstance(entry, list):
54 | entry = dict(sequential=entry)
55 | ((taskname, confg),) = entry.items()
56 | p.spawn(_run_spawned, ctx, confg, taskname)
57 |
58 |
59 | def _run_spawned(ctx, config, taskname):
60 | """Run one of the tasks (this runs in parallel with others)"""
61 | mgr = {}
62 | try:
63 | log.info('In parallel, running task %s...' % taskname)
64 | mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=config)
65 | if hasattr(mgr, '__enter__'):
66 | mgr.__enter__()
67 | finally:
68 | exc_info = sys.exc_info()
69 | if hasattr(mgr, '__exit__'):
70 | mgr.__exit__(*exc_info)
71 | del exc_info
72 |
--------------------------------------------------------------------------------
/teuthology/suite/test/test_placeholder.py:
--------------------------------------------------------------------------------
1 | from teuthology.suite.placeholder import (
2 | substitute_placeholders, dict_templ, Placeholder
3 | )
4 |
5 |
6 | class TestPlaceholder(object):
7 | def test_substitute_placeholders(self):
8 | suite_hash = 'suite_hash'
9 | input_dict = dict(
10 | suite='suite',
11 | suite_branch='suite_branch',
12 | suite_hash=suite_hash,
13 | ceph_branch='ceph_branch',
14 | ceph_hash='ceph_hash',
15 | teuthology_branch='teuthology_branch',
16 | teuthology_sha1='teuthology_sha1',
17 | machine_type='machine_type',
18 | distro='distro',
19 | distro_version='distro_version',
20 | archive_upload='archive_upload',
21 | archive_upload_key='archive_upload_key',
22 | suite_repo='https://example.com/ceph/suite.git',
23 | suite_relpath='',
24 | ceph_repo='https://example.com/ceph/ceph.git',
25 | flavor='default',
26 | expire='expire',
27 | )
28 | output_dict = substitute_placeholders(dict_templ, input_dict)
29 | assert output_dict['suite'] == 'suite'
30 | assert output_dict['suite_sha1'] == suite_hash
31 | assert isinstance(dict_templ['suite'], Placeholder)
32 | assert isinstance(
33 | dict_templ['overrides']['admin_socket']['branch'],
34 | Placeholder)
35 |
36 | def test_null_placeholders_dropped(self):
37 | input_dict = dict(
38 | suite='suite',
39 | suite_branch='suite_branch',
40 | suite_hash='suite_hash',
41 | ceph_branch='ceph_branch',
42 | ceph_hash='ceph_hash',
43 | teuthology_branch='teuthology_branch',
44 | teuthology_sha1='teuthology_sha1',
45 | machine_type='machine_type',
46 | archive_upload='archive_upload',
47 | archive_upload_key='archive_upload_key',
48 | distro=None,
49 | distro_version=None,
50 | suite_repo='https://example.com/ceph/suite.git',
51 | suite_relpath='',
52 | ceph_repo='https://example.com/ceph/ceph.git',
53 | flavor=None,
54 | expire='expire',
55 | )
56 | output_dict = substitute_placeholders(dict_templ, input_dict)
57 | assert 'os_type' not in output_dict
58 |
--------------------------------------------------------------------------------
/teuthology/test/task/test_internal.py:
--------------------------------------------------------------------------------
1 | from teuthology.config import FakeNamespace
2 | from teuthology.task import internal
3 |
4 |
5 | class TestInternal(object):
6 | def setup_method(self):
7 | self.ctx = FakeNamespace()
8 | self.ctx.config = dict()
9 |
10 | def test_buildpackages_prep(self):
11 | #
12 | # no buildpackages nor install tasks
13 | #
14 | self.ctx.config = { 'tasks': [] }
15 | assert internal.buildpackages_prep(self.ctx,
16 | self.ctx.config) == internal.BUILDPACKAGES_NOTHING
17 | #
18 | # make the buildpackages tasks the first to run
19 | #
20 | self.ctx.config = {
21 | 'tasks': [ { 'atask': None },
22 | { 'internal.buildpackages_prep': None },
23 | { 'btask': None },
24 | { 'install': None },
25 | { 'buildpackages': None } ],
26 | }
27 | assert internal.buildpackages_prep(self.ctx,
28 | self.ctx.config) == internal.BUILDPACKAGES_FIRST
29 | assert self.ctx.config == {
30 | 'tasks': [ { 'atask': None },
31 | { 'internal.buildpackages_prep': None },
32 | { 'buildpackages': None },
33 | { 'btask': None },
34 | { 'install': None } ],
35 | }
36 | #
37 | # the buildpackages task already the first task to run
38 | #
39 | assert internal.buildpackages_prep(self.ctx,
40 | self.ctx.config) == internal.BUILDPACKAGES_OK
41 | #
42 | # no buildpackages task
43 | #
44 | self.ctx.config = {
45 | 'tasks': [ { 'install': None } ],
46 | }
47 | assert internal.buildpackages_prep(self.ctx,
48 | self.ctx.config) == internal.BUILDPACKAGES_NOTHING
49 | #
50 | # no install task: the buildpackages task must be removed
51 | #
52 | self.ctx.config = {
53 | 'tasks': [ { 'buildpackages': None } ],
54 | }
55 | assert internal.buildpackages_prep(self.ctx,
56 | self.ctx.config) == internal.BUILDPACKAGES_REMOVED
57 | assert self.ctx.config == {'tasks': []}
58 |
--------------------------------------------------------------------------------
/scripts/node_cleanup.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import sys
4 |
5 | import teuthology
6 | from teuthology.config import config
7 | from teuthology.lock import query, ops
8 |
9 |
10 | def main():
11 | args = parse_args(sys.argv[1:])
12 | if args.verbose:
13 | teuthology.log.setLevel(logging.DEBUG)
14 | else:
15 | teuthology.log.setLevel(100)
16 | log = logging.getLogger(__name__)
17 | logger = logging.getLogger()
18 | for handler in logger.handlers:
19 | handler.setFormatter(
20 | logging.Formatter('%(message)s')
21 | )
22 | try:
23 | stale = query.find_stale_locks(args.owner)
24 | except Exception:
25 | log.exception(f"Error while check for stale locks held by {args.owner}")
26 | return
27 | if not stale:
28 | return
29 | by_owner = {}
30 | for node in stale:
31 | if args.owner and node['locked_by'] != args.owner:
32 | log.warning(
33 | f"Node {node['name']} expected to be locked by {args.owner} "
34 | f"but found {node['locked_by']} instead"
35 | )
36 | continue
37 | by_owner.setdefault(node['locked_by'], []).append(node)
38 | if args.dry_run:
39 | log.info("Would attempt to unlock:")
40 | for owner, nodes in by_owner.items():
41 | for node in nodes:
42 | node_job = node['description'].replace(
43 | config.archive_base, config.results_ui_server)
44 | log.info(f"{node['name']}\t{node_job}")
45 | else:
46 | for owner, nodes in by_owner.items():
47 | ops.unlock_safe([node["name"] for node in nodes], owner)
48 | log.info(f"unlocked {len(stale)} nodes")
49 |
50 | def parse_args(argv):
51 | parser = argparse.ArgumentParser(
52 | description="Find and unlock nodes that are still locked by jobs that are no "
53 | "longer active",
54 | )
55 | parser.add_argument(
56 | '-v', '--verbose',
57 | action='store_true',
58 | default=False,
59 | help='Be more verbose',
60 | )
61 | parser.add_argument(
62 | '--dry-run',
63 | action='store_true',
64 | default=False,
65 | help="List nodes that would be unlocked if the flag were omitted",
66 | )
67 | parser.add_argument(
68 | '--owner',
69 | help='Optionally, find nodes locked by a specific user',
70 | )
71 | return parser.parse_args(argv)
72 |
73 | if __name__ == "__main__":
74 | main()
75 |
--------------------------------------------------------------------------------
/docs/docker-compose/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | postgres:
5 | image: postgres:14
6 | healthcheck:
7 | test: [ "CMD", "pg_isready", "-q", "-d", "paddles", "-U", "admin" ]
8 | timeout: 5s
9 | interval: 10s
10 | retries: 2
11 | environment:
12 | - POSTGRES_USER=root
13 | - POSTGRES_PASSWORD=password
14 | - APP_DB_USER=admin
15 | - APP_DB_PASS=password
16 | - APP_DB_NAME=paddles
17 | volumes:
18 | - ./db:/docker-entrypoint-initdb.d/
19 | ports:
20 | - 5432:5432
21 | paddles:
22 | image: quay.io/ceph-infra/paddles
23 | environment:
24 | PADDLES_SERVER_HOST: 0.0.0.0
25 | PADDLES_SQLALCHEMY_URL: postgresql+psycopg2://admin:password@postgres:5432/paddles
26 | depends_on:
27 | postgres:
28 | condition: service_healthy
29 | links:
30 | - postgres
31 | healthcheck:
32 | test: ["CMD", "curl", "-f", "http://0.0.0.0:8080"]
33 | timeout: 5s
34 | interval: 30s
35 | retries: 2
36 | ports:
37 | - 8080:8080
38 | pulpito:
39 | image: quay.io/ceph-infra/pulpito
40 | environment:
41 | PULPITO_PADDLES_ADDRESS: http://paddles:8080
42 | depends_on:
43 | paddles:
44 | condition: service_healthy
45 | links:
46 | - paddles
47 | healthcheck:
48 | test: ["CMD", "curl", "-f", "http://0.0.0.0:8081"]
49 | timeout: 5s
50 | interval: 10s
51 | retries: 2
52 | ports:
53 | - 8081:8081
54 | beanstalk:
55 | build: ../../beanstalk/alpine
56 | ports:
57 | - "11300:11300"
58 | teuthology:
59 | build:
60 | context: ../../
61 | dockerfile: ./docs/docker-compose/teuthology/Dockerfile
62 | args:
63 | SSH_PRIVKEY_FILE: $SSH_PRIVKEY_FILE
64 | depends_on:
65 | paddles:
66 | condition: service_healthy
67 | links:
68 | - paddles
69 | - beanstalk
70 | environment:
71 | SSH_PRIVKEY:
72 | SSH_PRIVKEY_FILE:
73 | MACHINE_TYPE:
74 | TESTNODES:
75 | TEUTHOLOGY_WAIT:
76 | TEUTH_BRANCH:
77 | volumes:
78 | - /tmp/archive_dir:/archive_dir:rw
79 | testnode:
80 | build:
81 | context: ./testnode
82 | dockerfile: ./Dockerfile
83 | deploy:
84 | replicas: 3
85 | depends_on:
86 | paddles:
87 | condition: service_healthy
88 | links:
89 | - paddles
90 | ports:
91 | - "22"
92 | environment:
93 | SSH_PUBKEY:
94 | platform: linux/amd64
95 |
--------------------------------------------------------------------------------