├── .coveragerc
├── .dockerignore
├── .github
├── CODEOWNERS
└── workflows
│ ├── ci.yml
│ ├── dependencies.yml
│ ├── dev_container.yml
│ └── integration.yml
├── .gitignore
├── .gitlab-ci.yml
├── .readthedocs.yml
├── .travis.yml
├── LICENSE
├── MANIFEST.in
├── README.rst
├── ansible.cfg
├── beanstalk
└── alpine
│ └── Dockerfile
├── bootstrap
├── build_qemu_image.sh
├── containers
└── teuthology-dev
│ ├── .teuthology.yaml
│ ├── Dockerfile
│ ├── containerized_node.yaml
│ └── teuthology.sh
├── docs
├── COMPONENTS.rst
├── ChangeLog.rst
├── INSTALL.rst
├── LAB_SETUP.rst
├── Makefile
├── README.rst
├── _static
│ ├── create_nodes.py
│ ├── nginx_paddles
│ ├── nginx_pulpito
│ ├── nginx_test_logs
│ └── worker_start.sh
├── _themes
│ └── ceph
│ │ ├── static
│ │ ├── font
│ │ │ ├── ApexSans-Book.eot
│ │ │ ├── ApexSans-Book.svg
│ │ │ ├── ApexSans-Book.ttf
│ │ │ ├── ApexSans-Book.woff
│ │ │ ├── ApexSans-Medium.eot
│ │ │ ├── ApexSans-Medium.svg
│ │ │ ├── ApexSans-Medium.ttf
│ │ │ └── ApexSans-Medium.woff
│ │ └── nature.css_t
│ │ └── theme.conf
├── cephlab.png
├── cephlab.svg
├── commands
│ ├── list.rst
│ ├── teuthology-describe.rst
│ ├── teuthology-dispatcher.rst
│ ├── teuthology-kill.rst
│ ├── teuthology-lock.rst
│ ├── teuthology-ls.rst
│ ├── teuthology-openstack.rst
│ ├── teuthology-prune-logs.rst
│ ├── teuthology-queue.rst
│ ├── teuthology-reimage.rst
│ ├── teuthology-report.rst
│ ├── teuthology-results.rst
│ ├── teuthology-schedule.rst
│ ├── teuthology-suite.rst
│ ├── teuthology-update-inventory.rst
│ ├── teuthology-updatekeys.rst
│ ├── teuthology-wait.rst
│ ├── teuthology-worker.rst
│ └── teuthology.rst
├── conf.py
├── detailed_test_config.rst
├── docker-compose
│ ├── README.md
│ ├── db
│ │ └── 01-init.sh
│ ├── docker-compose.yml
│ ├── start.sh
│ ├── testnode
│ │ ├── Dockerfile
│ │ ├── testnode_start.sh
│ │ ├── testnode_stop.sh
│ │ └── testnode_sudoers
│ └── teuthology
│ │ ├── .teuthology.yaml
│ │ ├── Dockerfile
│ │ ├── containerized_node.yaml
│ │ └── teuthology.sh
├── downburst_vms.rst
├── exporter.rst
├── fragment_merging.rst
├── index.rst
├── intro_testers.rst
├── laptop
│ ├── README.md
│ ├── default-pool.xml
│ ├── front.xml
│ ├── hosts
│ ├── ssh_config
│ ├── targets.sql
│ └── teuthology.yaml
├── libcloud_backend.rst
├── openstack_backend.rst
├── requirements.txt
└── siteconfig.rst
├── examples
├── 3node_ceph.yaml
├── 3node_rgw.yaml
└── parallel_example.yaml
├── hammer.sh
├── openstack-delegate.sh
├── pyproject.toml
├── pytest.ini
├── requirements.txt
├── requirements.yml
├── scripts
├── __init__.py
├── describe.py
├── dispatcher.py
├── exporter.py
├── kill.py
├── lock.py
├── ls.py
├── node_cleanup.py
├── openstack.py
├── prune_logs.py
├── queue.py
├── reimage.py
├── report.py
├── results.py
├── run.py
├── schedule.py
├── suite.py
├── supervisor.py
├── test
│ ├── script.py
│ ├── test_dispatcher_.py
│ ├── test_exporter_.py
│ ├── test_lock.py
│ ├── test_ls.py
│ ├── test_prune_logs.py
│ ├── test_report.py
│ ├── test_results.py
│ ├── test_run.py
│ ├── test_schedule.py
│ ├── test_suite.py
│ ├── test_supervisor_.py
│ └── test_updatekeys.py
├── update_inventory.py
├── updatekeys.py
└── wait.py
├── setup.cfg
├── systemd
├── teuthology-dispatcher@.service
└── teuthology-exporter.service
├── teuthology
├── __init__.py
├── beanstalk.py
├── ceph.conf.template
├── config.py
├── contextutil.py
├── describe_tests.py
├── dispatcher
│ ├── __init__.py
│ ├── supervisor.py
│ └── test
│ │ ├── test_dispatcher.py
│ │ ├── test_reimage_error_mark_machine_down.py
│ │ └── test_supervisor.py
├── exceptions.py
├── exit.py
├── exporter.py
├── job_status.py
├── kill.py
├── lock
│ ├── __init__.py
│ ├── cli.py
│ ├── ops.py
│ ├── query.py
│ ├── test
│ │ ├── __init__.py
│ │ └── test_lock.py
│ └── util.py
├── ls.py
├── misc.py
├── nuke
│ └── __init__.py
├── openstack
│ ├── __init__.py
│ ├── archive-key
│ ├── archive-key.pub
│ ├── bootstrap-teuthology.sh
│ ├── openstack-basic.yaml
│ ├── openstack-buildpackages.yaml
│ ├── openstack-centos-6.5-user-data.txt
│ ├── openstack-centos-7.0-user-data.txt
│ ├── openstack-centos-7.1-user-data.txt
│ ├── openstack-centos-7.2-user-data.txt
│ ├── openstack-centos-7.3-user-data.txt
│ ├── openstack-debian-7.0-user-data.txt
│ ├── openstack-debian-8.0-user-data.txt
│ ├── openstack-opensuse-15.0-user-data.txt
│ ├── openstack-opensuse-15.1-user-data.txt
│ ├── openstack-opensuse-42.1-user-data.txt
│ ├── openstack-opensuse-42.2-user-data.txt
│ ├── openstack-opensuse-42.3-user-data.txt
│ ├── openstack-sle-12.1-user-data.txt
│ ├── openstack-sle-12.2-user-data.txt
│ ├── openstack-sle-12.3-user-data.txt
│ ├── openstack-sle-15.0-user-data.txt
│ ├── openstack-sle-15.1-user-data.txt
│ ├── openstack-teuthology.cron
│ ├── openstack-teuthology.init
│ ├── openstack-ubuntu-12.04-user-data.txt
│ ├── openstack-ubuntu-14.04-user-data.txt
│ ├── openstack-ubuntu-16.04-user-data.txt
│ ├── openstack-user-data.txt
│ ├── setup-openstack.sh
│ └── test
│ │ ├── __init__.py
│ │ ├── archive-on-error.yaml
│ │ ├── noop.yaml
│ │ ├── openstack-integration.py
│ │ ├── resources_hint.yaml
│ │ ├── resources_hint_no_cinder.yaml
│ │ ├── stop_worker.yaml
│ │ ├── suites
│ │ ├── noop
│ │ │ ├── +
│ │ │ └── noop.yaml
│ │ └── nuke
│ │ │ └── +
│ │ ├── test_config.py
│ │ ├── test_openstack.py
│ │ └── user-data-test1.txt
├── orchestra
│ ├── __init__.py
│ ├── cluster.py
│ ├── connection.py
│ ├── console.py
│ ├── daemon
│ │ ├── __init__.py
│ │ ├── cephadmunit.py
│ │ ├── group.py
│ │ ├── state.py
│ │ └── systemd.py
│ ├── monkey.py
│ ├── opsys.py
│ ├── remote.py
│ ├── run.py
│ └── test
│ │ ├── __init__.py
│ │ ├── files
│ │ └── daemon-systemdstate-pid-ps-ef.output
│ │ ├── integration
│ │ ├── __init__.py
│ │ └── test_integration.py
│ │ ├── test_cluster.py
│ │ ├── test_connection.py
│ │ ├── test_console.py
│ │ ├── test_opsys.py
│ │ ├── test_remote.py
│ │ ├── test_run.py
│ │ ├── test_systemd.py
│ │ └── util.py
├── packaging.py
├── parallel.py
├── provision
│ ├── __init__.py
│ ├── cloud
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── openstack.py
│ │ ├── test
│ │ │ ├── test_base.py
│ │ │ ├── test_cloud_init.py
│ │ │ ├── test_cloud_util.py
│ │ │ ├── test_openstack.py
│ │ │ └── test_openstack_userdata_conf.yaml
│ │ └── util.py
│ ├── downburst.py
│ ├── fog.py
│ ├── openstack.py
│ ├── pelagos.py
│ └── test
│ │ ├── test_downburst.py
│ │ ├── test_fog.py
│ │ ├── test_init_provision.py
│ │ └── test_pelagos.py
├── prune.py
├── reimage.py
├── repo_utils.py
├── report.py
├── results.py
├── run.py
├── run_tasks.py
├── safepath.py
├── schedule.py
├── scrape.py
├── suite
│ ├── __init__.py
│ ├── build_matrix.py
│ ├── fragment-merge.lua
│ ├── matrix.py
│ ├── merge.py
│ ├── placeholder.py
│ ├── run.py
│ ├── test
│ │ ├── conftest.py
│ │ ├── suites
│ │ │ └── noop
│ │ │ │ └── noop.yaml
│ │ ├── test_build_matrix.py
│ │ ├── test_init.py
│ │ ├── test_matrix.py
│ │ ├── test_merge.py
│ │ ├── test_placeholder.py
│ │ ├── test_run_.py
│ │ └── test_util.py
│ └── util.py
├── task
│ ├── __init__.py
│ ├── ansible.py
│ ├── args.py
│ ├── background_exec.py
│ ├── buildpackages.py
│ ├── buildpackages
│ │ ├── Makefile
│ │ ├── centos-6.5-user-data.txt
│ │ ├── centos-7.0-user-data.txt
│ │ ├── centos-7.1-user-data.txt
│ │ ├── centos-7.2-user-data.txt
│ │ ├── centos-7.3-user-data.txt
│ │ ├── common.sh
│ │ ├── debian-8.0-user-data.txt
│ │ ├── make-deb.sh
│ │ ├── make-rpm.sh
│ │ ├── opensuse-15.0-user-data.txt
│ │ ├── opensuse-42.1-user-data.txt
│ │ ├── opensuse-42.2-user-data.txt
│ │ ├── opensuse-42.3-user-data.txt
│ │ ├── sle-12.1-user-data.txt
│ │ ├── sle-12.2-user-data.txt
│ │ ├── sle-12.3-user-data.txt
│ │ ├── sle-15.0-user-data.txt
│ │ ├── ubuntu-12.04-user-data.txt
│ │ ├── ubuntu-14.04-user-data.txt
│ │ ├── ubuntu-16.04-user-data.txt
│ │ └── user-data.txt
│ ├── ceph_ansible.py
│ ├── cephmetrics.py
│ ├── clock.py
│ ├── common_fs_utils.py
│ ├── console_log.py
│ ├── dump_ctx.py
│ ├── exec.py
│ ├── full_sequential.py
│ ├── full_sequential_finally.py
│ ├── hadoop.py
│ ├── install
│ │ ├── __init__.py
│ │ ├── bin
│ │ │ ├── adjust-ulimits
│ │ │ ├── daemon-helper
│ │ │ └── stdin-killer
│ │ ├── deb.py
│ │ ├── packages.yaml
│ │ ├── redhat.py
│ │ ├── rpm.py
│ │ └── util.py
│ ├── interactive.py
│ ├── internal
│ │ ├── __init__.py
│ │ ├── check_lock.py
│ │ ├── edit_sudoers.sh
│ │ ├── git_ignore_ssl.py
│ │ ├── lock_machines.py
│ │ ├── redhat.py
│ │ ├── syslog.py
│ │ └── vm_setup.py
│ ├── iscsi.py
│ ├── kernel.py
│ ├── knfsd.py
│ ├── localdir.py
│ ├── lockfile.py
│ ├── loop.py
│ ├── mpi.py
│ ├── nfs.py
│ ├── nop.py
│ ├── parallel.py
│ ├── parallel_example.py
│ ├── pcp.j2
│ ├── pcp.py
│ ├── pexec.py
│ ├── print.py
│ ├── proc_thrasher.py
│ ├── selinux.py
│ ├── sequential.py
│ ├── sleep.py
│ ├── ssh_keys.py
│ ├── tasktest.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── test_fetch_coredumps.py
│ │ ├── test_locking.py
│ │ └── test_run.py
│ └── timer.py
├── templates
│ ├── email-sleep-before-teardown.jinja2
│ └── rocketchat-sleep-before-teardown.jinja2
├── test
│ ├── __init__.py
│ ├── fake_archive.py
│ ├── fake_fs.py
│ ├── integration
│ │ ├── __init__.py
│ │ └── test_suite.py
│ ├── task
│ │ ├── __init__.py
│ │ ├── test_ansible.py
│ │ ├── test_ceph_ansible.py
│ │ ├── test_console_log.py
│ │ ├── test_install.py
│ │ ├── test_internal.py
│ │ ├── test_kernel.py
│ │ ├── test_pcp.py
│ │ └── test_selinux.py
│ ├── test_config.py
│ ├── test_contextutil.py
│ ├── test_describe_tests.py
│ ├── test_email_sleep_before_teardown.py
│ ├── test_exit.py
│ ├── test_get_distro.py
│ ├── test_get_distro_version.py
│ ├── test_get_multi_machine_types.py
│ ├── test_imports.py
│ ├── test_job_status.py
│ ├── test_kill.py
│ ├── test_ls.py
│ ├── test_misc.py
│ ├── test_packaging.py
│ ├── test_parallel.py
│ ├── test_repo_utils.py
│ ├── test_report.py
│ ├── test_results.py
│ ├── test_run.py
│ ├── test_safepath.py
│ ├── test_schedule.py
│ ├── test_scrape.py
│ ├── test_timer.py
│ └── test_vps_os_vers_parameter_checking.py
├── timer.py
└── util
│ ├── __init__.py
│ ├── compat.py
│ ├── flock.py
│ ├── loggerfile.py
│ ├── scanner.py
│ ├── sentry.py
│ ├── test
│ ├── files
│ │ ├── test_unit_test.xml
│ │ └── test_valgrind.xml
│ ├── test_scanner.py
│ └── test_time.py
│ └── time.py
├── tox.ini
├── update-requirements.sh
└── watch-suite.sh
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit = */test/*
3 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | venv
2 | virtualenv
3 | .tox
4 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @ceph/teuthology
2 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | test:
11 | name: CI on python${{ matrix.python }} via ${{ matrix.os }}
12 | runs-on: ${{ matrix.os }}
13 | strategy:
14 | matrix:
15 | include:
16 | - os: ubuntu-22.04
17 | python: "3.10"
18 | - os: ubuntu-22.04
19 | python: "3.11"
20 | - os: ubuntu-24.04
21 | python: "3.12"
22 | steps:
23 | - uses: actions/checkout@v4
24 | - name: Setup Python
25 | uses: actions/setup-python@v5
26 | with:
27 | python-version: ${{ matrix.python }}
28 | - name: Install tox
29 | run: pip install tox
30 | - name: Run flake8
31 | run: tox -e flake8
32 | - name: Run unit tests
33 | run: tox -e py3
34 | - name: Run docs build
35 | run: tox -e docs
36 |
--------------------------------------------------------------------------------
/.github/workflows/dependencies.yml:
--------------------------------------------------------------------------------
1 | name: dependencies
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | upgrade:
11 | name: Test dependencies
12 | runs-on: ${{ matrix.os }}
13 | strategy:
14 | matrix:
15 | include:
16 | - os: ubuntu-22.04
17 | python: "3.10"
18 | - os: ubuntu-22.04
19 | python: "3.11"
20 | steps:
21 | - name: Set up Python
22 | uses: actions/setup-python@v5
23 | with:
24 | python-version: ${{ matrix.python }}
25 | - name: Checkout default branch
26 | uses: actions/checkout@v4
27 | with:
28 | ref: main
29 | path: teuthology
30 | - name: virtualenv
31 | run: |
32 | pip install --user virtualenv
33 | virtualenv ./virtualenv
34 | cd ./virtualenv/lib/python*
35 | touch no-global-site-packages.txt
36 | working-directory: ./teuthology
37 | - name: Initial bootstrap
38 | run: ./bootstrap install
39 | working-directory: ./teuthology
40 | - name: Move initial repository
41 | run: mv teuthology teuthology.orig
42 | - name: Checkout desired ref
43 | uses: actions/checkout@v4
44 | with:
45 | path: teuthology
46 | - name: Move virtualenv to new checkout
47 | run: mv ./teuthology.orig/virtualenv ./teuthology/
48 | - name: Re-run bootstrap
49 | run: ./bootstrap install
50 | working-directory: ./teuthology
51 |
--------------------------------------------------------------------------------
/.github/workflows/dev_container.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: dev_container
3 | on:
4 | push:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | docker:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v4
15 | - name: Set up QEMU
16 | uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
17 | - name: Set up Docker Buildx
18 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
19 | - name: Login to Quay.io
20 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
21 | with:
22 | registry: quay.io
23 | username: ${{ secrets.QUAY_USERNAME }}
24 | password: ${{ secrets.QUAY_ROBOT_TOKEN }}
25 | - name: Build and push
26 | uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
27 | env:
28 | QUAY_URI: quay.io/ceph-infra/teuthology-dev
29 | QUAY_TAG: ${{ github.event_name == 'pull_request' && github.head_ref || github.ref_name }}
30 | with:
31 | context: .
32 | file: containers/teuthology-dev/Dockerfile
33 | platforms: linux/amd64,linux/arm64
34 | push: true
35 | tags: ${{ env.QUAY_URI }}:${{ env.QUAY_TAG }}
36 | outputs: type=image,name=target
37 |
--------------------------------------------------------------------------------
/.github/workflows/integration.yml:
--------------------------------------------------------------------------------
1 | name: integration
2 | on:
3 | pull_request:
4 | workflow_dispatch:
5 | jobs:
6 | test:
7 | runs-on: ubuntu-24.04
8 | steps:
9 | - uses: actions/checkout@v4
10 | - name: Make archive directory
11 | run: mkdir /tmp/archive_dir
12 | - name: Test using docker-compose
13 | run: ./start.sh
14 | working-directory: ./docs/docker-compose
15 | - name: Rename Directory
16 | # Replace ":" with "_" everywhere in directory path.
17 | # This needs to be done because GA does not support ":" colon character in artifacts (like in /root-2025-03-06_18:47:26-teuthology:no-ceph-main-distro-default-testnode).
18 | # Invalid characters include: Double quote ", Colon :, Less than <, Greater than >, Vertical bar |, Asterisk *, Question mark ?, Carriage return \r, Line feed \n
19 | if: always()
20 | run: |
21 | for DIR in /tmp/archive_dir/root-*; do
22 | SAFE_DIR="${DIR//:/_}" # Replace in '/tmp/archive_dir/root-2025-03-06_18:47:26-teuthology:no-ceph-main-distro-default-testnode'
23 | if [ "$DIR" != "$SAFE_DIR" ]; then
24 | mv "$DIR" "$SAFE_DIR"
25 | fi
26 | done
27 | - name: Upload teuthology archive logs
28 | uses: actions/upload-artifact@v4
29 | if: always()
30 | with:
31 | name: teuthology-logs
32 | path: |
33 | /tmp/archive_dir/*
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | .#*
3 | ## the next line needs to start with a backslash to avoid looking like
4 | ## a comment
5 | \#*#
6 | .*.swp
7 |
8 | *.pyc
9 | *.pyo
10 | .tox
11 |
12 | /*.egg-info
13 | /virtualenv
14 | /build
15 | /*.yaml
16 | docs/build
17 |
18 | .ropeproject
19 | .coverage
20 |
21 | # autogenerated docs from sphinx-apidoc
22 | docs/modules.rst
23 | docs/teuthology.rst
24 | docs/teuthology.*.rst
25 |
26 | # PyCharm
27 | .idea
28 |
29 | # vscode
30 | .vscode/
31 |
32 | .ansible
33 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | teuthology:
2 | tags: [ ceph-workbench ]
3 | script: "git clean -ffqdx ; ./bootstrap install ; unset OS_AUTH_URL ; source virtualenv/bin/activate ; pip install tox ; tox"
4 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | version: 2
6 | formats: []
7 | build:
8 | os: ubuntu-22.04
9 | tools:
10 | python: "3.10"
11 | python:
12 | install:
13 | - method: pip
14 | path: .
15 | extra_requirements:
16 | - orchestra
17 | - requirements: docs/requirements.txt
18 | sphinx:
19 | builder: html
20 | configuration: docs/conf.py
21 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | env: HOME=/home/travis
2 |
3 | sudo: required
4 | dist: trusty
5 |
6 | before_install:
7 | - sudo apt-get -qq update
8 | - ./bootstrap install
9 |
10 | language: python
11 | python:
12 | - 2.7
13 |
14 | install:
15 | - pip install tox
16 |
17 | script: tox -rv
18 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014 Red Hat, Inc.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.rst
2 | include requirements.txt
3 | include tox.ini
4 | include pytest.ini
5 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | ===================================================
2 | `Teuthology` -- The Ceph integration test framework
3 | ===================================================
4 |
5 |
6 | Welcome! Teuthology's documentation is primarily hosted at `docs.ceph.com
7 | `__.
8 |
9 | You can also look at docs `inside this repository `__, but note that
10 | GitHub's `RST `__ rendering is quite
11 | limited. Mainly that means that links between documents will be broken.
12 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | # Store collections in this directory. This is to avoid potential compatibility
3 | # issues between differently-versioned ansible processes.
4 | collections_path = .ansible
5 |
--------------------------------------------------------------------------------
/beanstalk/alpine/Dockerfile:
--------------------------------------------------------------------------------
1 | # For beanstalkd 1.12 use edge branch
2 | #FROM alpine:edge
3 |
4 | FROM alpine:3.12.3
5 |
6 | MAINTAINER Kyrylo Shatskyy
7 |
8 | RUN apk update && apk add beanstalkd beanstalkd-doc
9 |
10 | ENV BEANSTALK_ADDR "0.0.0.0"
11 | ENV BEANSTALK_PORT "11300"
12 |
13 | CMD /usr/bin/beanstalkd -V -l $BEANSTALK_ADDR -p $BEANSTALK_PORT
14 |
--------------------------------------------------------------------------------
/build_qemu_image.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh -x
2 | set -e
3 |
4 | IMAGE_URL=http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-disk1.img
5 |
6 | wget -O base.qcow2 $IMAGE_URL
7 |
8 | image=base.raw
9 | qemu-img convert -O raw base.qcow2 $image
10 | rm -f base.qcow2
11 |
12 | # Note: this assumes that sector size is 512, and that there's only one
13 | # partition. very brittle.
14 | START_SECT=$(fdisk -lu $image | grep ^$image | awk '{print $3}')
15 | START_BYTE=$(echo "$START_SECT * 512" | bc)
16 |
17 | root=/tmp/$$
18 |
19 | cleanup() {
20 | sudo chroot $root rm -f /etc/resolv.conf || true
21 | sudo chroot $root ln -s ../run/resolvconf/resolv.conf /etc/resolv.conf || true
22 | sudo umount $root/proc || true
23 | sudo umount $root/sys || true
24 | sudo umount $root/dev/pts || true
25 | sudo umount $root
26 | sudo rmdir $root
27 | }
28 | trap cleanup INT TERM EXIT
29 |
30 | sudo mkdir $root
31 | sudo mount -o loop,offset=$START_BYTE $image $root
32 |
33 | # set up chroot
34 | sudo mount -t proc proc $root/proc
35 | sudo mount -t sysfs sysfs $root/sys
36 | sudo mount -t devpts devptr $root/dev/pts
37 |
38 | # set up network access
39 | sudo chroot $root rm /etc/resolv.conf
40 | sudo cp /etc/resolv.conf $root/etc/resolv.conf
41 |
42 | # packages
43 | # These should be kept in sync with ceph-qa-chef.git/cookbooks/ceph-qa/default.rb
44 | sudo chroot $root apt-get -y --force-yes install iozone3 bonnie++ dbench \
45 | tiobench build-essential attr libtool automake gettext uuid-dev \
46 | libacl1-dev bc xfsdump dmapi xfslibs-dev
47 |
48 | # install ltp without ltp-network-test, so we don't pull in xinetd and
49 | # a bunch of other unnecessary stuff
50 | sudo chroot $root apt-get -y --force-yes --no-install-recommends install ltp-kernel-test
51 |
52 | # add 9p fs support
53 | sudo chroot $root apt-get -y --force-yes install linux-image-extra-virtual
54 |
55 | cleanup
56 | trap - INT TERM EXIT
57 |
58 | qemu-img convert -O qcow2 $image output.qcow2
59 | rm -f $image
60 |
61 | exit 0
62 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/.teuthology.yaml:
--------------------------------------------------------------------------------
1 | queue_host: beanstalk
2 | queue_port: 11300
3 | lock_server: http://paddles:8080
4 | results_server: http://paddles:8080
5 | results_ui_server: http://pulpito:8081/
6 | teuthology_path: /teuthology
7 | archive_base: /archive_dir
8 | reserve_machines: 0
9 | lab_domain: ''
10 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:noble
2 | ENV DEBIAN_FRONTEND=noninteractive
3 | ENV LANG=C.UTF-8
4 | RUN apt-get update && \
5 | apt-get install -y \
6 | git \
7 | qemu-utils \
8 | python3-dev \
9 | libssl-dev \
10 | ipmitool \
11 | python3-pip \
12 | python3-venv \
13 | vim \
14 | libev-dev \
15 | libvirt-dev \
16 | libffi-dev \
17 | libyaml-dev \
18 | locales \
19 | lsb-release && \
20 | apt-get clean all && \
21 | locale-gen $LC_ALL
22 | WORKDIR /teuthology
23 | COPY requirements.txt requirements.yml ansible.cfg bootstrap /teuthology/
24 | RUN \
25 | cd /teuthology && \
26 | mkdir ../archive_dir && \
27 | mkdir log && \
28 | chmod +x /teuthology/bootstrap && \
29 | PIP_INSTALL_FLAGS="-r requirements.txt" ./bootstrap
30 | COPY . /teuthology
31 | RUN \
32 | git config -f ./.git/config --unset 'http.https://github.com/.extraheader' && \
33 | ./bootstrap
34 | COPY containers/teuthology-dev/containerized_node.yaml /teuthology
35 | COPY containers/teuthology-dev/.teuthology.yaml /root
36 | COPY containers/teuthology-dev/teuthology.sh /
37 | RUN \
38 | mkdir $HOME/.ssh && \
39 | touch $HOME/.ssh/id_rsa && \
40 | chmod 600 $HOME/.ssh/id_rsa && \
41 | echo "StrictHostKeyChecking=no" > $HOME/.ssh/config && \
42 | echo "UserKnownHostsFile=/dev/null" >> $HOME/.ssh/config
43 | ENTRYPOINT /teuthology.sh
44 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/containerized_node.yaml:
--------------------------------------------------------------------------------
1 | overrides:
2 | ansible.cephlab:
3 | skip_tags: "timezone,nagios,monitoring-scripts,ssh,hostname,pubkeys,zap,sudoers,kerberos,selinux,lvm,ntp-client,resolvconf,packages,cpan,nfs"
4 | vars:
5 | containerized_node: true
6 | ansible_user: root
7 | cm_user: root
8 | start_rpcbind: false
9 | cephadm:
10 | image: quay.ceph.io/ceph-ci/ceph:main
11 | osd_method: raw
12 | no_cgroups_split: true
13 |
--------------------------------------------------------------------------------
/containers/teuthology-dev/teuthology.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -e
3 | source /teuthology/virtualenv/bin/activate
4 | set -x
5 | cat /run/secrets/id_rsa > $HOME/.ssh/id_rsa
6 | if [ -n "$TEUTHOLOGY_TESTNODES" ]; then
7 | for node in $(echo $TEUTHOLOGY_TESTNODES | tr , ' '); do
8 | teuthology-update-inventory -m "$TEUTHOLOGY_MACHINE_TYPE" "$node"
9 | done
10 | TEUTHOLOGY_CONF=${TEUTHOLOGY_CONF:-}
11 | else
12 | TEUTHOLOGY_CONF=/teuthology/containerized_node.yaml
13 | fi
14 | export TEUTHOLOGY_MACHINE_TYPE=${TEUTHOLOGY_MACHINE_TYPE:-testnode}
15 | if [ "$TEUTHOLOGY_SUITE" != "none" ]; then
16 | if [ -n "$TEUTHOLOGY_BRANCH" ]; then
17 | TEUTH_BRANCH_FLAG="--teuthology-branch $TEUTHOLOGY_BRANCH"
18 | fi
19 | teuthology-suite -v \
20 | $TEUTH_BRANCH_FLAG \
21 | -m "$TEUTHOLOGY_MACHINE_TYPE" \
22 | --newest 100 \
23 | --ceph "${TEUTHOLOGY_CEPH_BRANCH:-main}" \
24 | --ceph-repo "${TEUTHOLOGY_CEPH_REPO:-https://github.com/ceph/ceph.git}" \
25 | --suite "${TEUTHOLOGY_SUITE:-teuthology:no-ceph}" \
26 | --suite-branch "${TEUTHOLOGY_SUITE_BRANCH:-main}" \
27 | --suite-repo "${TEUTHOLOGY_SUITE_REPO:-https://github.com/ceph/ceph.git}" \
28 | --filter-out "libcephfs,kclient" \
29 | --force-priority \
30 | --seed 349 \
31 | ${TEUTHOLOGY_SUITE_EXTRA_ARGS} \
32 | $TEUTHOLOGY_CONF
33 | DISPATCHER_EXIT_FLAG='--exit-on-empty-queue'
34 | teuthology-queue -m $TEUTHOLOGY_MACHINE_TYPE -s | \
35 | python3 -c "import sys, json; assert json.loads(sys.stdin.read())['count'] > 0, 'queue is empty!'"
36 | fi
37 | teuthology-dispatcher -v \
38 | --log-dir /teuthology/log \
39 | --tube "$TEUTHOLOGY_MACHINE_TYPE" \
40 | $DISPATCHER_EXIT_FLAG
41 |
--------------------------------------------------------------------------------
/docs/ChangeLog.rst:
--------------------------------------------------------------------------------
1 | Changelog
2 | =========
3 |
4 | 0.1.0
5 | -----
6 | * (Actual changelog coming soon)
7 |
--------------------------------------------------------------------------------
/docs/_static/create_nodes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # A sample script that can be used while setting up a new teuthology lab
3 | # This script will connect to the machines in your lab, and populate a
4 | # paddles instance with their information.
5 | #
6 | # You WILL need to modify it.
7 |
8 | import logging
9 | import sys
10 | from teuthology.orchestra.remote import Remote
11 | from teuthology.lock.ops import update_inventory
12 |
13 | paddles_url = 'http://paddles.example.com/nodes/'
14 |
15 | machine_type = 'typica'
16 | lab_domain = 'example.com'
17 | # Don't change the user. It won't work at this time.
18 | user = 'ubuntu'
19 | # We are populating 'typica003' -> 'typica192'
20 | machine_index_range = range(3, 192)
21 |
22 | log = logging.getLogger(sys.argv[0])
23 | logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
24 | logging.WARNING)
25 |
26 |
27 | def get_shortname(machine_type, index):
28 | """
29 | Given a number, return a hostname. Example:
30 | get_shortname('magna', 3) = 'magna003'
31 |
32 | Modify to suit your needs.
33 | """
34 | return machine_type + str(index).rjust(3, '0')
35 |
36 |
37 | def get_info(user, fqdn):
38 | remote = Remote('@'.join((user, fqdn)))
39 | return remote.inventory_info
40 |
41 |
42 | def main():
43 | shortnames = [get_shortname(machine_type, i) for i in machine_index_range]
44 | fqdns = ['.'.join((name, lab_domain)) for name in shortnames]
45 | for fqdn in fqdns:
46 | log.info("Creating %s", fqdn)
47 | base_info = dict(
48 | name=fqdn,
49 | locked=True,
50 | locked_by='initial@setup',
51 | machine_type=machine_type,
52 | description="Initial node creation",
53 | )
54 | try:
55 | info = get_info(user, fqdn)
56 | base_info.update(info)
57 | base_info['up'] = True
58 | except Exception as exc:
59 | log.error("{fqdn} is down".format(fqdn=fqdn))
60 | base_info['up'] = False
61 | base_info['description'] = repr(exc)
62 | update_inventory(base_info)
63 |
64 | if __name__ == '__main__':
65 | main()
66 |
--------------------------------------------------------------------------------
/docs/_static/nginx_paddles:
--------------------------------------------------------------------------------
1 | server {
2 | server_name paddles.example.com;
3 | proxy_send_timeout 600;
4 | proxy_connect_timeout 240;
5 | location / {
6 | proxy_pass http://paddles.example.com:8080/;
7 | proxy_set_header Host $host;
8 | proxy_set_header X-Real-IP $remote_addr;
9 | }
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/docs/_static/nginx_pulpito:
--------------------------------------------------------------------------------
1 | server {
2 | server_name pulpito.example.com;
3 | proxy_send_timeout 600;
4 | proxy_connect_timeout 240;
5 | location / {
6 | proxy_pass http://pulpito.example.com:8081/;
7 | proxy_set_header Host $host;
8 | proxy_set_header X-Real-IP $remote_addr;
9 | }
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/docs/_static/nginx_test_logs:
--------------------------------------------------------------------------------
1 | server {
2 | allow all;
3 | autoindex on;
4 | server_name test_logs.example.com;
5 | root /home/teuthworker/archive;
6 | default_type text/plain;
7 | }
8 |
--------------------------------------------------------------------------------
/docs/_static/worker_start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # A simple script used by Red Hat to start teuthology-worker processes.
4 |
5 | ARCHIVE=${ARCHIVE:-"$HOME/archive"}
6 | WORKER_LOGS=$ARCHIVE/worker_logs
7 |
8 | function start_workers_for_tube {
9 | echo "Starting $2 workers for $1"
10 | for i in `seq 1 $2`
11 | do
12 | teuthology-worker -v --archive-dir $ARCHIVE --tube $1 --log-dir $WORKER_LOGS &
13 | done
14 | }
15 |
16 | function start_all {
17 | start_workers_for_tube plana 50
18 | start_workers_for_tube mira 50
19 | start_workers_for_tube vps 80
20 | start_workers_for_tube burnupi 10
21 | start_workers_for_tube tala 5
22 | start_workers_for_tube saya 10
23 | start_workers_for_tube multi 100
24 | }
25 |
26 | function main {
27 | printf '%s\n' "$*"
28 | if [[ -z "$*" ]]
29 | then
30 | start_all
31 | elif [ ! -z "$2" ] && [ "$2" -gt "0" ]
32 | then
33 | start_workers_for_tube $1 $2
34 | else
35 | echo "usage: $0 [tube_name number_of_workers]" >&2
36 | exit 1
37 | fi
38 | }
39 |
40 | main "$@"
41 |
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Book.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/docs/_themes/ceph/static/font/ApexSans-Book.eot
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Book.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/docs/_themes/ceph/static/font/ApexSans-Book.ttf
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Book.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/docs/_themes/ceph/static/font/ApexSans-Book.woff
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Medium.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/docs/_themes/ceph/static/font/ApexSans-Medium.eot
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Medium.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/docs/_themes/ceph/static/font/ApexSans-Medium.ttf
--------------------------------------------------------------------------------
/docs/_themes/ceph/static/font/ApexSans-Medium.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/docs/_themes/ceph/static/font/ApexSans-Medium.woff
--------------------------------------------------------------------------------
/docs/_themes/ceph/theme.conf:
--------------------------------------------------------------------------------
1 | [theme]
2 | inherit = basic
3 | stylesheet = nature.css
4 | pygments_style = tango
5 |
--------------------------------------------------------------------------------
/docs/cephlab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/docs/cephlab.png
--------------------------------------------------------------------------------
/docs/commands/list.rst:
--------------------------------------------------------------------------------
1 | Command line interface (CLI)
2 | ============================
3 |
4 | Help output of the available command line tools for teuthology.
5 |
6 | .. toctree::
7 | :glob:
8 |
9 | *
10 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-describe.rst:
--------------------------------------------------------------------------------
1 | teuthology-describe
2 | ===================
3 |
4 | .. program-output:: teuthology-describe --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-dispatcher.rst:
--------------------------------------------------------------------------------
1 | teuthology-dispatcher
2 | =====================
3 |
4 | .. program-output:: teuthology-dispatcher --help
5 |
6 | trouble-shooting notes:
7 | =======================
8 |
9 | - Github unreachable kills dispatcher - The dispatcher might be killed when github becomes unreachable, e.g., https://tracker.ceph.com/issues/54366
--------------------------------------------------------------------------------
/docs/commands/teuthology-kill.rst:
--------------------------------------------------------------------------------
1 | teuthology-kill
2 | ===============
3 |
4 | .. program-output:: teuthology-kill --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-lock.rst:
--------------------------------------------------------------------------------
1 | teuthology-lock
2 | ===============
3 |
4 | .. program-output:: teuthology-lock --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-ls.rst:
--------------------------------------------------------------------------------
1 | teuthology-ls
2 | =============
3 |
4 | .. program-output:: teuthology-ls --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-openstack.rst:
--------------------------------------------------------------------------------
1 | teuthology-openstack
2 | ====================
3 |
4 | .. program-output:: teuthology-openstack --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-prune-logs.rst:
--------------------------------------------------------------------------------
1 | teuthology-prune-logs
2 | =====================
3 |
4 | .. program-output:: teuthology-prune-logs --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-queue.rst:
--------------------------------------------------------------------------------
1 | teuthology-queue
2 | ================
3 |
4 | .. program-output:: teuthology-queue --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-reimage.rst:
--------------------------------------------------------------------------------
1 | teuthology-reimage
2 | ==================
3 |
4 | .. program-output:: teuthology-reimage --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-report.rst:
--------------------------------------------------------------------------------
1 | teuthology-report
2 | =================
3 |
4 | .. program-output:: teuthology-report --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-results.rst:
--------------------------------------------------------------------------------
1 | teuthology-results
2 | ==================
3 |
4 | .. program-output:: teuthology-results --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-schedule.rst:
--------------------------------------------------------------------------------
1 | teuthology-schedule
2 | ===================
3 |
4 | .. program-output:: teuthology-schedule --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-suite.rst:
--------------------------------------------------------------------------------
1 | teuthology-suite
2 | ================
3 |
4 | .. program-output:: teuthology-suite --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-update-inventory.rst:
--------------------------------------------------------------------------------
1 | teuthology-update-inventory
2 | ===========================
3 |
4 | .. program-output:: teuthology-update-inventory --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-updatekeys.rst:
--------------------------------------------------------------------------------
1 | teuthology-updatekeys
2 | =====================
3 |
4 | .. program-output:: teuthology-updatekeys --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-wait.rst:
--------------------------------------------------------------------------------
1 | teuthology-wait
2 | =====================
3 |
4 | .. program-output:: teuthology-wait --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology-worker.rst:
--------------------------------------------------------------------------------
1 | teuthology-worker
2 | =================
3 |
4 | .. program-output:: teuthology-worker --help
5 |
--------------------------------------------------------------------------------
/docs/commands/teuthology.rst:
--------------------------------------------------------------------------------
1 | teuthology
2 | ==========
3 |
4 | .. program-output:: teuthology --help
5 |
--------------------------------------------------------------------------------
/docs/docker-compose/db/01-init.sh:
--------------------------------------------------------------------------------
1 | set -e
2 | export PGPASSWORD=$POSTGRES_PASSWORD;
3 | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
4 | CREATE USER $APP_DB_USER WITH PASSWORD '$APP_DB_PASS';
5 | CREATE DATABASE $APP_DB_NAME;
6 | GRANT ALL PRIVILEGES ON DATABASE $APP_DB_NAME TO $APP_DB_USER;
7 | \connect $APP_DB_NAME $APP_DB_USER
8 | EOSQL
--------------------------------------------------------------------------------
/docs/docker-compose/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | postgres:
5 | image: postgres:14
6 | healthcheck:
7 | test: [ "CMD", "pg_isready", "-q", "-d", "paddles", "-U", "admin" ]
8 | timeout: 5s
9 | interval: 10s
10 | retries: 2
11 | environment:
12 | - POSTGRES_USER=root
13 | - POSTGRES_PASSWORD=password
14 | - APP_DB_USER=admin
15 | - APP_DB_PASS=password
16 | - APP_DB_NAME=paddles
17 | volumes:
18 | - ./db:/docker-entrypoint-initdb.d/
19 | ports:
20 | - 5432:5432
21 | paddles:
22 | image: quay.io/ceph-infra/paddles
23 | environment:
24 | PADDLES_SERVER_HOST: 0.0.0.0
25 | PADDLES_SQLALCHEMY_URL: postgresql+psycopg2://admin:password@postgres:5432/paddles
26 | depends_on:
27 | postgres:
28 | condition: service_healthy
29 | links:
30 | - postgres
31 | healthcheck:
32 | test: ["CMD", "curl", "-f", "http://0.0.0.0:8080"]
33 | timeout: 5s
34 | interval: 30s
35 | retries: 2
36 | ports:
37 | - 8080:8080
38 | pulpito:
39 | image: quay.io/ceph-infra/pulpito
40 | environment:
41 | PULPITO_PADDLES_ADDRESS: http://paddles:8080
42 | depends_on:
43 | paddles:
44 | condition: service_healthy
45 | links:
46 | - paddles
47 | healthcheck:
48 | test: ["CMD", "curl", "-f", "http://0.0.0.0:8081"]
49 | timeout: 5s
50 | interval: 10s
51 | retries: 2
52 | ports:
53 | - 8081:8081
54 | beanstalk:
55 | build: ../../beanstalk/alpine
56 | ports:
57 | - "11300:11300"
58 | teuthology:
59 | build:
60 | context: ../../
61 | dockerfile: ./docs/docker-compose/teuthology/Dockerfile
62 | args:
63 | SSH_PRIVKEY_FILE: $SSH_PRIVKEY_FILE
64 | depends_on:
65 | paddles:
66 | condition: service_healthy
67 | links:
68 | - paddles
69 | - beanstalk
70 | environment:
71 | SSH_PRIVKEY:
72 | SSH_PRIVKEY_FILE:
73 | MACHINE_TYPE:
74 | TESTNODES:
75 | TEUTHOLOGY_WAIT:
76 | TEUTH_BRANCH:
77 | volumes:
78 | - /tmp/archive_dir:/archive_dir:rw
79 | testnode:
80 | build:
81 | context: ./testnode
82 | dockerfile: ./Dockerfile
83 | deploy:
84 | replicas: 3
85 | depends_on:
86 | paddles:
87 | condition: service_healthy
88 | links:
89 | - paddles
90 | ports:
91 | - "22"
92 | environment:
93 | SSH_PUBKEY:
94 | platform: linux/amd64
95 |
--------------------------------------------------------------------------------
/docs/docker-compose/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | export TEUTHOLOGY_BRANCH=${TEUTHOLOGY_BRANCH:-$(git branch --show-current)}
4 | export TEUTH_BRANCH=${TEUTHOLOGY_BRANCH}
5 | if [ -n "$ANSIBLE_INVENTORY_REPO" ]; then
6 | basename=$(basename $ANSIBLE_INVENTORY_REPO | cut -d. -f1)
7 | if [ ! -d "$basename" ]; then
8 | git clone \
9 | --depth 1 \
10 | $ANSIBLE_INVENTORY_REPO
11 | fi
12 | mkdir -p teuthology/ansible_inventory
13 | cp -rf $basename/ansible/ teuthology/ansible_inventory
14 | if [ ! -d teuthology/ansible_inventory/hosts ]; then
15 | mv -f teuthology/ansible_inventory/inventory teuthology/ansible_inventory/hosts
16 | fi
17 | fi
18 | # Make the hosts and secrets directories, so that the COPY instruction in the
19 | # Dockerfile does not cause a build failure when not using this feature.
20 | mkdir -p teuthology/ansible_inventory/hosts teuthology/ansible_inventory/secrets
21 |
22 | if [ -n "$CUSTOM_CONF" ]; then
23 | cp "$CUSTOM_CONF" teuthology/
24 | fi
25 |
26 | # Generate an SSH keypair to use if necessary
27 | if [ -z "$SSH_PRIVKEY_PATH" ]; then
28 | SSH_PRIVKEY_PATH=$(mktemp -u /tmp/teuthology-ssh-key-XXXXXX)
29 | ssh-keygen -t rsa -N '' -f $SSH_PRIVKEY_PATH
30 | export SSH_PRIVKEY=$(cat $SSH_PRIVKEY_PATH)
31 | export SSH_PUBKEY=$(cat $SSH_PRIVKEY_PATH.pub)
32 | export SSH_PRIVKEY_FILE=id_rsa
33 | else
34 | export SSH_PRIVKEY=$(cat $SSH_PRIVKEY_PATH)
35 | export SSH_PRIVKEY_FILE=$(basename $SSH_PRIVKEY_PATH | cut -d. -f1)
36 | fi
37 |
38 | if [ -z "$TEUTHOLOGY_WAIT" ]; then
39 | DC_EXIT_FLAG='--abort-on-container-exit --exit-code-from teuthology'
40 | DC_AUTO_DOWN_CMD='docker compose down'
41 | fi
42 | export TEUTHOLOGY_WAIT
43 |
44 | trap "docker compose down" SIGINT
45 | docker compose up \
46 | --build \
47 | $DC_EXIT_FLAG
48 | $DC_AUTO_DOWN_CMD
49 |
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 | ENV DEBIAN_FRONTEND=noninteractive
3 | RUN apt update && \
4 | apt -y install \
5 | sudo \
6 | openssh-server \
7 | hostname \
8 | curl \
9 | python3-pip \
10 | apache2 \
11 | nfs-kernel-server && \
12 | apt clean all
13 | COPY testnode_start.sh /
14 | COPY testnode_stop.sh /
15 | COPY testnode_sudoers /etc/sudoers.d/teuthology
16 | RUN \
17 | ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key -N '' && \
18 | sed -i 's/#PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config && \
19 | mkdir -p /root/.ssh && \
20 | chmod 700 /root/.ssh && \
21 | useradd -g sudo ubuntu && \
22 | mkdir -p /home/ubuntu/.ssh && \
23 | chmod 700 /home/ubuntu/.ssh && \
24 | chown -R ubuntu /home/ubuntu
25 | EXPOSE 22
26 | ENTRYPOINT /testnode_start.sh
27 |
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/testnode_start.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -x
3 | echo "$SSH_PUBKEY" > /root/.ssh/authorized_keys
4 | echo "$SSH_PUBKEY" > /home/ubuntu/.ssh/authorized_keys
5 | chown ubuntu /home/ubuntu/.ssh/authorized_keys
6 | . /etc/os-release
7 | if [ $ID = 'centos' ]; then
8 | VERSION_ID=${VERSION_ID}.stream
9 | fi
10 | payload="{\"name\": \"$(hostname)\", \"machine_type\": \"testnode\", \"up\": true, \"locked\": false, \"os_type\": \"${ID}\", \"os_version\": \"${VERSION_ID}\"}"
11 | for i in $(seq 1 5); do
12 | echo "attempt $i"
13 | curl -v -f -d "$payload" http://paddles:8080/nodes/ && break
14 | sleep 1
15 | done
16 | mkdir -p /run/sshd
17 | exec /usr/sbin/sshd -D
18 |
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/testnode_stop.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -x
3 | hostname=$(hostname)
4 | payload="{\"name\": \"$hostname\", \"machine_type\": \"testnode\", \"up\": false}"
5 | for i in $(seq 1 5); do
6 | echo "attempt $i"
7 | curl -s -f -X PUT -d "$payload" http://paddles:8080/nodes/$hostname/ && break
8 | sleep 1
9 | done
10 |
--------------------------------------------------------------------------------
/docs/docker-compose/testnode/testnode_sudoers:
--------------------------------------------------------------------------------
1 | %sudo ALL=(ALL) NOPASSWD: ALL
2 | # For ansible pipelining
3 | Defaults !requiretty
4 | Defaults visiblepw
5 |
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/.teuthology.yaml:
--------------------------------------------------------------------------------
1 | queue_host: beanstalk
2 | queue_port: 11300
3 | lock_server: http://paddles:8080
4 | results_server: http://paddles:8080
5 | results_ui_server: http://pulpito:8081/
6 | teuthology_path: /teuthology
7 | archive_base: /archive_dir
8 | reserve_machines: 0
9 | lab_domain: ''
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 | ARG SSH_PRIVKEY_FILE=id_ed25519
3 | ENV DEBIAN_FRONTEND=noninteractive
4 | ENV LANG=C.UTF-8
5 | RUN apt-get update && \
6 | apt-get install -y \
7 | git \
8 | qemu-utils \
9 | python3-dev \
10 | libssl-dev \
11 | ipmitool \
12 | python3-pip \
13 | python3-venv \
14 | vim \
15 | locales-all \
16 | libev-dev \
17 | libvirt-dev \
18 | libffi-dev \
19 | libyaml-dev \
20 | locales \
21 | lsb-release && \
22 | apt-get clean all && \
23 | locale-gen $LC_ALL
24 | WORKDIR /teuthology
25 | COPY requirements.txt requirements.yml ansible.cfg bootstrap /teuthology/
26 | RUN \
27 | cd /teuthology && \
28 | mkdir ../archive_dir && \
29 | mkdir log && \
30 | chmod +x /teuthology/bootstrap && \
31 | PIP_INSTALL_FLAGS="-r requirements.txt" ./bootstrap
32 | COPY . /teuthology
33 | RUN \
34 | ./bootstrap
35 | COPY docs/docker-compose/teuthology/containerized_node.yaml /teuthology
36 | COPY docs/docker-compose/teuthology/.teuthology.yaml /root
37 | COPY docs/docker-compose/teuthology/teuthology.sh /
38 | RUN mkdir -p /etc/ansible
39 | COPY docs/docker-compose/teuthology/ansible_inventory/hosts /etc/ansible/
40 | COPY docs/docker-compose/teuthology/ansible_inventory/secrets /etc/ansible/
41 | RUN \
42 | mkdir $HOME/.ssh && \
43 | touch $HOME/.ssh/${SSH_PRIVKEY_FILE} && \
44 | chmod 600 $HOME/.ssh/${SSH_PRIVKEY_FILE} && \
45 | echo "StrictHostKeyChecking=no" > $HOME/.ssh/config && \
46 | echo "UserKnownHostsFile=/dev/null" >> $HOME/.ssh/config
47 | ENTRYPOINT /teuthology.sh
48 |
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/containerized_node.yaml:
--------------------------------------------------------------------------------
1 | overrides:
2 | ansible.cephlab:
3 | skip_tags: "timezone,nagios,monitoring-scripts,ssh,hostname,pubkeys,zap,sudoers,kerberos,selinux,lvm,ntp-client,resolvconf,packages,cpan,nfs"
4 | vars:
5 | containerized_node: true
6 | ansible_user: root
7 | cm_user: root
8 | start_rpcbind: false
9 |
--------------------------------------------------------------------------------
/docs/docker-compose/teuthology/teuthology.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | set -e
3 | # We don't want -x yet, in case the private key is sensitive
4 | if [ -n "$SSH_PRIVKEY_FILE" ]; then
5 | echo "$SSH_PRIVKEY" > $HOME/.ssh/$SSH_PRIVKEY_FILE
6 | fi
7 | source /teuthology/virtualenv/bin/activate
8 | set -x
9 | if [ -n "$TESTNODES" ]; then
10 | for node in $(echo $TESTNODES | tr , ' '); do
11 | teuthology-update-inventory -m $MACHINE_TYPE $node
12 | done
13 | CUSTOM_CONF=${CUSTOM_CONF:-}
14 | else
15 | CUSTOM_CONF=/teuthology/containerized_node.yaml
16 | fi
17 | export MACHINE_TYPE=${MACHINE_TYPE:-testnode}
18 | if [ -z "$TEUTHOLOGY_WAIT" ]; then
19 | if [ -n "$TEUTH_BRANCH" ]; then
20 | TEUTH_BRANCH_FLAG="--teuthology-branch $TEUTH_BRANCH"
21 | fi
22 | teuthology-suite -v \
23 | $TEUTH_BRANCH_FLAG \
24 | --ceph-repo https://github.com/ceph/ceph.git \
25 | --suite-repo https://github.com/ceph/ceph.git \
26 | -c main \
27 | -m $MACHINE_TYPE \
28 | --limit 1 \
29 | -n 100 \
30 | --suite teuthology:no-ceph \
31 | --filter-out "libcephfs,kclient,stream,centos,rhel" \
32 | -d ubuntu -D 22.04 \
33 | --suite-branch main \
34 | --subset 9000/100000 \
35 | -p 75 \
36 | --seed 349 \
37 | --force-priority \
38 | $CUSTOM_CONF
39 | DISPATCHER_EXIT_FLAG='--exit-on-empty-queue'
40 | teuthology-queue -m $MACHINE_TYPE -s | \
41 | python3 -c "import sys, json; assert json.loads(sys.stdin.read())['count'] > 0, 'queue is empty!'"
42 | fi
43 | teuthology-dispatcher -v \
44 | --log-dir /teuthology/log \
45 | --tube $MACHINE_TYPE \
46 | $DISPATCHER_EXIT_FLAG
47 |
--------------------------------------------------------------------------------
/docs/exporter.rst:
--------------------------------------------------------------------------------
1 | .. _exporter:
2 |
3 | ==================================
4 | The Teuthology Prometheus Exporter
5 | ==================================
6 |
7 | To help make it easier to determine the status of the lab, we've created a
8 | `Prometheus `__ exporter (helpfully named
9 | `teuthology-exporter`. We use `Grafana `__ to visualize
10 | the data we collect.
11 |
12 | It listens on port 61764, and scrapes every 60 seconds by default.
13 |
14 |
15 | Exposed Metrics
16 | ===============
17 |
18 | .. list-table::
19 |
20 | * - Name
21 | - Type
22 | - Description
23 | - Labels
24 | * - beanstalk_queue_length
25 | - Gauge
26 | - The number of jobs in the beanstalkd queue
27 | - machine type
28 | * - beanstalk_queue_paused
29 | - Gauge
30 | - Whether or not the beanstalkd queue is paused
31 | - machine type
32 | * - teuthology_dispatchers
33 | - Gauge
34 | - The number of running teuthology-dispatcher instances
35 | - machine type
36 | * - teuthology_job_processes
37 | - Gauge
38 | - The number of running job *processes*
39 | -
40 | * - teuthology_job_results_total
41 | - Gauge
42 | - The number of completed jobs
43 | - status (pass/fail/dead)
44 | * - teuthology_nodes
45 | - Gauge
46 | - The number of test nodes
47 | - up, locked
48 | * - teuthology_job_duration_seconds
49 | - Summary
50 | - The time it took to run a job
51 | - suite
52 | * - teuthology_task_duration_seconds
53 | - Summary
54 | - The time it took for each phase of each task to run
55 | - name, phase (enter/exit)
56 | * - teuthology_bootstrap_duration_seconds
57 | - Summary
58 | - The time it took to run teuthology's bootstrap script
59 | -
60 | * - teuthology_node_locking_duration_seconds
61 | - Summary
62 | - The time it took to lock nodes
63 | - machine type, count
64 | * - teuthology_node_reimaging_duration_seconds
65 | - Summary
66 | - The time it took to reimage nodes
67 | - machine type, count
68 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Content Index
2 | =============
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | README.rst
8 | intro_testers.rst
9 | fragment_merging.rst
10 | siteconfig.rst
11 | detailed_test_config.rst
12 | openstack_backend.rst
13 | libcloud_backend.rst
14 | downburst_vms.rst
15 | INSTALL.rst
16 | LAB_SETUP.rst
17 | exporter.rst
18 | commands/list.rst
19 | ChangeLog.rst
20 |
21 | Indices and tables
22 | ==================
23 |
24 | * :ref:`genindex`
25 | * :ref:`modindex`
26 | * :ref:`search`
27 |
--------------------------------------------------------------------------------
/docs/laptop/default-pool.xml:
--------------------------------------------------------------------------------
1 |
2 | default
3 |
4 | /var/lib/libvirt/images/default
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/docs/laptop/front.xml:
--------------------------------------------------------------------------------
1 |
2 | front
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/docs/laptop/hosts:
--------------------------------------------------------------------------------
1 |
2 | # teuthology hosts used as downburst vps targets
3 | 192.168.123.100 target-00 target-00.local
4 | 192.168.123.101 target-01 target-01.local
5 | 192.168.123.102 target-02 target-02.local
6 | 192.168.123.103 target-03 target-03.local
7 |
8 |
--------------------------------------------------------------------------------
/docs/laptop/ssh_config:
--------------------------------------------------------------------------------
1 | Host target-*
2 | User ubuntu
3 | StrictHostKeyChecking no
4 | UserKnownHostsFile /dev/null
5 | LogLevel ERROR
6 |
7 |
--------------------------------------------------------------------------------
/docs/laptop/targets.sql:
--------------------------------------------------------------------------------
1 | begin transaction;
2 | insert into nodes (name, machine_type, is_vm, locked, up) values ('localhost', 'libvirt', false, true, true);
3 | insert into nodes (name, machine_type, is_vm, locked, up, mac_address, vm_host_id) values
4 | ('target-00.local', 'vps', true, false, false, '52:54:00:00:00:00', (select id from nodes where name='localhost')),
5 | ('target-01.local', 'vps', true, false, false, '52:54:00:00:00:01', (select id from nodes where name='localhost')),
6 | ('target-02.local', 'vps', true, false, false, '52:54:00:00:00:02', (select id from nodes where name='localhost')),
7 | ('target-03.local', 'vps', true, false, false, '52:54:00:00:00:03', (select id from nodes where name='localhost'));
8 | commit transaction
9 |
10 |
--------------------------------------------------------------------------------
/docs/laptop/teuthology.yaml:
--------------------------------------------------------------------------------
1 | # replace $HOME with whatever appropriate to your needs
2 | # teuthology-lock
3 | lab_domain: local
4 | lock_server: http://localhost:80
5 | default_machine_type: vps
6 | # teuthology-run
7 | results_server: http://localhost:80
8 | # we do not need reserve_machines on localhost
9 | reserve_machines: 0
10 | # point to your teuthology
11 | teuthology_path: $HOME/teuthology
12 | # beanstalkd
13 | queue_host: localhost
14 | queue_port: 11300
15 | # if you want make and test patches to ceph-cm-ansible
16 | # ceph_cm_ansible_git_url: $HOME/ceph-cm-ansible
17 | # customize kvm guests parameter
18 | downburst:
19 | path: $HOME/downburst/virtualenv/bin/downburst
20 | discover_url: http://localhost:8181/images/ibs/
21 | machine:
22 | cpus: 2
23 | disk: 12G
24 | ram: 2G
25 | volumes:
26 | size: 8G
27 | count: 4
28 | check_package_signatures: false
29 | suite_verify_ceph_hash: false
30 |
31 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx >= 5.0.0 # for python 3.10
2 | sphinxcontrib-programoutput
3 | mock == 2.0.0
4 |
--------------------------------------------------------------------------------
/examples/3node_ceph.yaml:
--------------------------------------------------------------------------------
1 | roles:
2 | - [mon.0, mds.0, osd.0]
3 | - [mon.1, osd.1]
4 | - [mon.2, client.0]
5 |
6 | tasks:
7 | - install:
8 | - ceph:
9 | - kclient: [client.0]
10 | - interactive:
11 |
12 | targets:
13 | ubuntu@: ssh-rsa
14 | ubuntu@: ssh-rsa
15 | ubuntu@: ssh-rsa
16 |
--------------------------------------------------------------------------------
/examples/3node_rgw.yaml:
--------------------------------------------------------------------------------
1 | interactive-on-error: true
2 | overrides:
3 | ceph:
4 | branch: main
5 | fs: xfs
6 | roles:
7 | - - mon.a
8 | - mon.c
9 | - osd.0
10 | - - mon.b
11 | - mds.a
12 | - osd.1
13 | - - client.0
14 | tasks:
15 | - install:
16 | - ceph: null
17 | - rgw:
18 | - client.0
19 | - interactive:
20 |
21 | targets:
22 | ubuntu@: ssh-rsa
23 | ubuntu@: ssh-rsa
24 | ubuntu@: ssh-rsa
25 |
--------------------------------------------------------------------------------
/examples/parallel_example.yaml:
--------------------------------------------------------------------------------
1 | interactive-on-error: true
2 | overrides:
3 | roles:
4 | - - test0
5 | - test1
6 | - - test0
7 | - test1
8 | - - test0
9 | tasks:
10 | - install:
11 | - parallel_example:
12 | - test0
13 | - test1
14 |
15 | targets:
16 | ubuntu@: ssh-rsa
17 | ubuntu@: ssh-rsa
18 | ubuntu@: ssh-rsa
19 |
20 |
21 |
--------------------------------------------------------------------------------
/hammer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh -ex
2 | #
3 | # simple script to repeat a test until it fails
4 | #
5 |
6 | if [ $1 = "-a" ]; then
7 | shift
8 | job=$1
9 | log="--archive $job.out"
10 | else
11 | job=$1
12 | log=""
13 | fi
14 |
15 | test -e $1
16 |
17 | title() {
18 | echo '\[\033]0;hammer '$job' '$N' passes\007\]'
19 | }
20 |
21 | N=0
22 | title
23 | [ -n "$log" ] && [ -d $job.out ] && rm -rf $job.out
24 | while teuthology $log $job $2 $3 $4
25 | do
26 | date
27 | N=$(($N+1))
28 | echo "$job: $N passes"
29 | [ -n "$log" ] && rm -rf $job.out
30 | title
31 | done
32 | echo "$job: $N passes, then failure."
33 |
--------------------------------------------------------------------------------
/openstack-delegate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | trap "rm -f teuthology-integration.pem ; openstack keypair delete teuthology-integration ; openstack server delete teuthology-integration" EXIT
4 |
5 | openstack keypair create teuthology-integration > teuthology-integration.pem
6 | chmod 600 teuthology-integration.pem
7 | teuthology-openstack --name teuthology-integration --key-filename teuthology-integration.pem --key-name teuthology-integration --suite teuthology/integration --wait --teardown --upload
8 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | build-backend = "setuptools.build_meta"
3 | requires = [
4 | "setuptools>=45",
5 | "wheel",
6 | "setuptools_scm>=6.2",
7 | ]
8 |
9 | [tool.setuptools_scm]
10 | version_scheme = "python-simplified-semver"
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | norecursedirs = .git build virtualenv teuthology.egg-info .tox */integration task/tests
3 | log_cli=true
4 | log_level=NOTSET
5 | addopts = -p no:cacheprovider
6 |
--------------------------------------------------------------------------------
/requirements.yml:
--------------------------------------------------------------------------------
1 | ---
2 | collections:
3 | - amazon.aws
4 | - name: ansible.netcommon
5 | version: "<6.0.0" # 6.0 requires ansible-core >= 2.14
6 | - ansible.posix
7 | - name: ansible.utils
8 | version: "<3.0.0" # 3.0 requires ansible-core >= 2.14
9 | - community.docker
10 | - community.general
11 | - community.postgresql
12 |
13 |
--------------------------------------------------------------------------------
/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/scripts/__init__.py
--------------------------------------------------------------------------------
/scripts/dispatcher.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 |
4 | import teuthology.dispatcher.supervisor
5 |
6 | from .supervisor import parse_args as parse_supervisor_args
7 |
8 |
9 | def parse_args(argv):
10 | parser = argparse.ArgumentParser(
11 | description="Start a dispatcher for the specified tube. Grab jobs from a beanstalk queue and run the teuthology tests they describe as subprocesses. The subprocess invoked is teuthology-supervisor."
12 | )
13 | parser.add_argument(
14 | "-v",
15 | "--verbose",
16 | action="store_true",
17 | help="be more verbose",
18 | )
19 | parser.add_argument(
20 | "-a",
21 | "--archive-dir",
22 | type=str,
23 | help="path to archive results in",
24 | )
25 | parser.add_argument(
26 | "-t",
27 | "--tube",
28 | type=str,
29 | help="which beanstalk tube to read jobs from",
30 | required=True,
31 | )
32 | parser.add_argument(
33 | "-l",
34 | "--log-dir",
35 | type=str,
36 | help="path in which to store the dispatcher log",
37 | required=True,
38 | )
39 | parser.add_argument(
40 | "--exit-on-empty-queue",
41 | action="store_true",
42 | help="if the queue is empty, exit",
43 | )
44 | return parser.parse_args(argv)
45 |
46 |
47 | def main():
48 | if "--supervisor" in sys.argv:
49 | # This is for transitional compatibility, so the old dispatcher can
50 | # invoke the new supervisor. Once old dispatchers are phased out,
51 | # this block can be as well.
52 | sys.argv.remove("--supervisor")
53 | sys.argv[0] = "teuthology-supervisor"
54 | sys.exit(teuthology.dispatcher.supervisor.main(
55 | parse_supervisor_args(sys.argv[1:])
56 | ))
57 | else:
58 | sys.exit(teuthology.dispatcher.main(parse_args(sys.argv[1:])))
59 |
60 |
61 | if __name__ == "__main__":
62 | main()
63 |
--------------------------------------------------------------------------------
/scripts/exporter.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.exporter
4 |
5 | doc = """
6 | usage: teuthology-exporter --help
7 | teuthology-exporter [--interval INTERVAL]
8 |
9 | optional arguments:
10 | -h, --help show this help message and exit
11 | --interval INTERVAL update metrics this often, in seconds
12 | [default: 60]
13 | """
14 |
15 |
16 | def main():
17 | args = docopt.docopt(doc)
18 | teuthology.exporter.main(args)
19 |
--------------------------------------------------------------------------------
/scripts/kill.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.config
4 | import teuthology.kill
5 |
6 | doc = """
7 | usage: teuthology-kill -h
8 | teuthology-kill [-a ARCHIVE] [-p] -r RUN
9 | teuthology-kill [-a ARCHIVE] [-p] -m MACHINE_TYPE -r RUN
10 | teuthology-kill [-a ARCHIVE] [-o OWNER] -r RUN -j JOB ...
11 | teuthology-kill [-a ARCHIVE] [-o OWNER] -J JOBSPEC
12 | teuthology-kill [-p] -o OWNER -m MACHINE_TYPE -r RUN
13 |
14 | Kill running teuthology jobs:
15 | 1. Removes any queued jobs from the beanstalk queue
16 | 2. Kills any running jobs
17 | 3. Nukes any machines involved
18 |
19 | NOTE: Must be run on the same machine that is executing the teuthology job
20 | processes.
21 |
22 | optional arguments:
23 | -h, --help show this help message and exit
24 | -a ARCHIVE, --archive ARCHIVE
25 | The base archive directory
26 | [default: {archive_base}]
27 | -p, --preserve-queue Preserve the queue - do not delete queued jobs
28 | -r, --run RUN The name(s) of the run(s) to kill
29 | -j, --job JOB The job_id of the job to kill
30 | -J, --jobspec JOBSPEC
31 | The 'jobspec' of the job to kill. A jobspec consists of
32 | both the name of the run and the job_id, separated by a
33 | '/'. e.g. 'my-test-run/1234'
34 | -o, --owner OWNER The owner of the job(s)
35 | -m, --machine-type MACHINE_TYPE
36 | The type of machine the job(s) are running on.
37 | This is required if killing a job that is still
38 | entirely in the queue.
39 | """.format(archive_base=teuthology.config.config.archive_base)
40 |
41 |
42 | def main():
43 | args = docopt.docopt(doc)
44 | teuthology.kill.main(args)
45 |
--------------------------------------------------------------------------------
/scripts/ls.py:
--------------------------------------------------------------------------------
1 | """
2 | usage: teuthology-ls [-h] [-v]
3 |
4 | List teuthology job results
5 |
6 | positional arguments:
7 | path under which to archive results
8 |
9 | optional arguments:
10 | -h, --help show this help message and exit
11 | -v, --verbose show reasons tests failed
12 | """
13 | import docopt
14 | import teuthology.ls
15 |
16 |
17 | def main():
18 | args = docopt.docopt(__doc__)
19 | teuthology.ls.main(args)
20 |
--------------------------------------------------------------------------------
/scripts/node_cleanup.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import sys
4 |
5 | import teuthology
6 | from teuthology.config import config
7 | from teuthology.lock import query, ops
8 |
9 |
10 | def main():
11 | args = parse_args(sys.argv[1:])
12 | if args.verbose:
13 | teuthology.log.setLevel(logging.DEBUG)
14 | else:
15 | teuthology.log.setLevel(100)
16 | log = logging.getLogger(__name__)
17 | logger = logging.getLogger()
18 | for handler in logger.handlers:
19 | handler.setFormatter(
20 | logging.Formatter('%(message)s')
21 | )
22 | try:
23 | stale = query.find_stale_locks(args.owner)
24 | except Exception:
25 | log.exception(f"Error while check for stale locks held by {args.owner}")
26 | return
27 | if not stale:
28 | return
29 | by_owner = {}
30 | for node in stale:
31 | if args.owner and node['locked_by'] != args.owner:
32 | log.warning(
33 | f"Node {node['name']} expected to be locked by {args.owner} "
34 | f"but found {node['locked_by']} instead"
35 | )
36 | continue
37 | by_owner.setdefault(node['locked_by'], []).append(node)
38 | if args.dry_run:
39 | log.info("Would attempt to unlock:")
40 | for owner, nodes in by_owner.items():
41 | for node in nodes:
42 | node_job = node['description'].replace(
43 | config.archive_base, config.results_ui_server)
44 | log.info(f"{node['name']}\t{node_job}")
45 | else:
46 | for owner, nodes in by_owner.items():
47 | ops.unlock_safe([node["name"] for node in nodes], owner)
48 | log.info(f"unlocked {len(stale)} nodes")
49 |
50 | def parse_args(argv):
51 | parser = argparse.ArgumentParser(
52 | description="Find and unlock nodes that are still locked by jobs that are no "
53 | "longer active",
54 | )
55 | parser.add_argument(
56 | '-v', '--verbose',
57 | action='store_true',
58 | default=False,
59 | help='Be more verbose',
60 | )
61 | parser.add_argument(
62 | '--dry-run',
63 | action='store_true',
64 | default=False,
65 | help="List nodes that would be unlocked if the flag were omitted",
66 | )
67 | parser.add_argument(
68 | '--owner',
69 | help='Optionally, find nodes locked by a specific user',
70 | )
71 | return parser.parse_args(argv)
72 |
73 | if __name__ == "__main__":
74 | main()
75 |
--------------------------------------------------------------------------------
/scripts/prune_logs.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.config
4 | import teuthology.prune
5 |
6 | doc = """
7 | usage:
8 | teuthology-prune-logs -h
9 | teuthology-prune-logs [-v] [options]
10 |
11 | Prune old logfiles from the archive
12 |
13 | optional arguments:
14 | -h, --help Show this help message and exit
15 | -v, --verbose Be more verbose
16 | -a ARCHIVE, --archive ARCHIVE
17 | The base archive directory
18 | [default: {archive_base}]
19 | --dry-run Don't actually delete anything; just log what would be
20 | deleted
21 | -p DAYS, --pass DAYS Remove all logs for jobs which passed and are older
22 | than DAYS. Negative values will skip this operation.
23 | [default: 14]
24 | -f DAYS, --fail DAYS Like --pass, but for failed jobs. [default: -1]
25 | -r DAYS, --remotes DAYS
26 | Remove the 'remote' subdir of jobs older than DAYS.
27 | Negative values will skip this operation.
28 | [default: 60]
29 | -z DAYS, --compress DAYS
30 | Compress (using gzip) any teuthology.log files older
31 | than DAYS. Negative values will skip this operation.
32 | [default: 30]
33 | """.format(archive_base=teuthology.config.config.archive_base)
34 |
35 |
36 | def main():
37 | args = docopt.docopt(doc)
38 | teuthology.prune.main(args)
39 |
--------------------------------------------------------------------------------
/scripts/queue.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.config
4 | import teuthology.beanstalk
5 |
6 | doc = """
7 | usage: teuthology-queue -h
8 | teuthology-queue [-s|-d|-f] -m MACHINE_TYPE
9 | teuthology-queue [-r] -m MACHINE_TYPE
10 | teuthology-queue -m MACHINE_TYPE -D PATTERN
11 | teuthology-queue -p SECONDS [-m MACHINE_TYPE]
12 |
13 | List Jobs in queue.
14 | If -D is passed, then jobs with PATTERN in the job name are deleted from the
15 | queue.
16 |
17 | Arguments:
18 | -m, --machine_type MACHINE_TYPE [default: multi]
19 | Which machine type queue to work on.
20 |
21 | optional arguments:
22 | -h, --help Show this help message and exit
23 | -D, --delete PATTERN Delete Jobs with PATTERN in their name
24 | -d, --description Show job descriptions
25 | -r, --runs Only show run names
26 | -f, --full Print the entire job config. Use with caution.
27 | -s, --status Prints the status of the queue
28 | -p, --pause SECONDS Pause queues for a number of seconds. A value of 0
29 | will unpause. If -m is passed, pause that queue,
30 | otherwise pause all queues.
31 | """
32 |
33 |
34 | def main():
35 | args = docopt.docopt(doc)
36 | teuthology.beanstalk.main(args)
37 |
--------------------------------------------------------------------------------
/scripts/reimage.py:
--------------------------------------------------------------------------------
1 | import docopt
2 | import sys
3 |
4 | import teuthology.reimage
5 |
6 | doc = """
7 | usage: teuthology-reimage --help
8 | teuthology-reimage --os-type distro --os-version version [options] ...
9 |
10 | Reimage nodes without locking using specified distro type and version.
11 | The nodes must be locked by the current user, otherwise an error occurs.
12 | Custom owner can be specified in order to provision someone else nodes.
13 | Reimaging unlocked nodes cannot be provided.
14 |
15 | Standard arguments:
16 | -h, --help Show this help message and exit
17 | -v, --verbose Be more verbose
18 | --os-type Distro type like: rhel, ubuntu, etc.
19 | --os-version Distro version like: 7.6, 16.04, etc.
20 | --owner user@host Owner of the locked machines
21 | """
22 |
23 | def main(argv=sys.argv[1:]):
24 | args = docopt.docopt(doc, argv=argv)
25 | return teuthology.reimage.main(args)
26 |
--------------------------------------------------------------------------------
/scripts/report.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology.report
4 |
5 | doc = """
6 | usage:
7 | teuthology-report -h
8 | teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] [-D] -r RUN ...
9 | teuthology-report [-v] [-s SERVER] [-a ARCHIVE] [-D] -r RUN -j JOB ...
10 | teuthology-report [-v] [-R] [-n] [-s SERVER] [-a ARCHIVE] --all-runs
11 |
12 | Submit test results to a web service
13 |
14 | optional arguments:
15 | -h, --help show this help message and exit
16 | -a ARCHIVE, --archive ARCHIVE
17 | The base archive directory
18 | [default: {archive_base}]
19 | -r [RUN ...], --run [RUN ...]
20 | A run (or list of runs) to submit
21 | -j [JOB ...], --job [JOB ...]
22 | A job (or list of jobs) to submit
23 | --all-runs Submit all runs in the archive
24 | -R, --refresh Re-push any runs already stored on the server. Note
25 | that this may be slow.
26 | -s SERVER, --server SERVER
27 | "The server to post results to, e.g.
28 | http://localhost:8080/ . May also be specified in
29 | ~/.teuthology.yaml as 'results_server'
30 | -n, --no-save By default, when submitting all runs, we remember the
31 | last successful submission in a file called
32 | 'last_successful_run'. Pass this flag to disable that
33 | behavior.
34 | -D, --dead Mark all given jobs (or entire runs) with status
35 | 'dead'. Implies --refresh.
36 | -v, --verbose be more verbose
37 | """.format(archive_base=teuthology.config.config.archive_base)
38 |
39 |
40 | def main():
41 | args = docopt.docopt(doc)
42 | teuthology.report.main(args)
43 |
--------------------------------------------------------------------------------
/scripts/results.py:
--------------------------------------------------------------------------------
1 | """
2 | usage: teuthology-results [-h] [-v] [--dry-run] [--email EMAIL] [--timeout TIMEOUT] --archive-dir DIR --name NAME [--subset SUBSET] [--seed SEED] [--no-nested-subset]
3 |
4 | Email teuthology suite results
5 |
6 | optional arguments:
7 | -h, --help show this help message and exit
8 | -v, --verbose be more verbose
9 | --dry-run Instead of sending the email, just print it
10 | --email EMAIL address to email test failures to
11 | --timeout TIMEOUT how many seconds to wait for all tests to finish
12 | [default: 0]
13 | --archive-dir DIR path under which results for the suite are stored
14 | --name NAME name of the suite
15 | --subset SUBSET subset passed to teuthology-suite
16 | --seed SEED random seed used in teuthology-suite
17 | --no-nested-subset disable nested subsets used in teuthology-suite
18 | """
19 | import docopt
20 | import teuthology.results
21 |
22 |
23 | def main():
24 | args = docopt.docopt(__doc__)
25 | teuthology.results.main(args)
26 |
--------------------------------------------------------------------------------
/scripts/run.py:
--------------------------------------------------------------------------------
1 | """
2 | usage: teuthology --help
3 | teuthology --version
4 | teuthology [options] [--] ...
5 |
6 | Run ceph integration tests
7 |
8 | positional arguments:
9 | one or more config files to read
10 |
11 | optional arguments:
12 | -h, --help show this help message and exit
13 | -v, --verbose be more verbose
14 | --version the current installed version of teuthology
15 | -a DIR, --archive DIR path to archive results in
16 | --description DESCRIPTION job description
17 | --owner OWNER job owner
18 | --lock lock machines for the duration of the run
19 | --machine-type MACHINE_TYPE Type of machine to lock/run tests on.
20 | --os-type OS_TYPE Distro/OS of machine to run test on.
21 | --os-version OS_VERSION Distro/OS version of machine to run test on.
22 | --block block until locking machines succeeds (use with --lock)
23 | --name NAME name for this teuthology run
24 | --suite-path SUITE_PATH Location of ceph-qa-suite on disk. If not specified,
25 | it will be fetched
26 | --interactive-on-error drop to a python shell on failure, which will
27 | halt the job; developer can then ssh to targets
28 | and examine cluster state.
29 |
30 | """
31 | import docopt
32 |
33 | import teuthology.run
34 |
35 |
36 | def main():
37 | args = docopt.docopt(__doc__, version=teuthology.__version__)
38 | teuthology.run.main(args)
39 |
--------------------------------------------------------------------------------
/scripts/supervisor.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 |
4 | import teuthology.dispatcher.supervisor
5 |
6 |
7 | def parse_args(argv):
8 | parser = argparse.ArgumentParser(
9 | description="Supervise and run a teuthology job; normally only run by the dispatcher",
10 | )
11 | parser.add_argument(
12 | "-v",
13 | "--verbose",
14 | action="store_true",
15 | help="be more verbose",
16 | )
17 | parser.add_argument(
18 | "-a",
19 | "--archive-dir",
20 | type=str,
21 | help="path in which to store the job's logfiles",
22 | required=True,
23 | )
24 | parser.add_argument(
25 | "--bin-path",
26 | type=str,
27 | help="teuthology bin path",
28 | required=True,
29 | )
30 | parser.add_argument(
31 | "--job-config",
32 | type=str,
33 | help="file descriptor of job's config file",
34 | required=True,
35 | )
36 | return parser.parse_args(argv)
37 |
38 |
39 | def main():
40 | sys.exit(teuthology.dispatcher.supervisor.main(parse_args(sys.argv[1:])))
41 |
42 |
43 | if __name__ == "__main__":
44 | main()
45 |
--------------------------------------------------------------------------------
/scripts/test/script.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | from pytest import raises
3 |
4 |
5 | class Script(object):
6 | script_name = 'teuthology'
7 |
8 | def test_help(self):
9 | args = (self.script_name, '--help')
10 | out = subprocess.check_output(args).decode()
11 | assert out.startswith('usage')
12 |
13 | def test_invalid(self):
14 | args = (self.script_name, '--invalid-option')
15 | with raises(subprocess.CalledProcessError):
16 | subprocess.check_call(args)
17 |
--------------------------------------------------------------------------------
/scripts/test/test_dispatcher_.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestDispatcher(Script):
5 | script_name = 'teuthology-dispatcher'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_exporter_.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestExporter(Script):
5 | script_name = 'teuthology-exporter'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_lock.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestLock(Script):
5 | script_name = 'teuthology-lock'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_ls.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | from script import Script
4 | from scripts import ls
5 |
6 | doc = ls.__doc__
7 |
8 |
9 | class TestLs(Script):
10 | script_name = 'teuthology-ls'
11 |
12 | def test_args(self):
13 | args = docopt.docopt(doc, ["--verbose", "some/archive/dir"])
14 | assert args["--verbose"]
15 | assert args[""] == "some/archive/dir"
16 |
--------------------------------------------------------------------------------
/scripts/test/test_prune_logs.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestPruneLogs(Script):
5 | script_name = 'teuthology-prune-logs'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_report.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestReport(Script):
5 | script_name = 'teuthology-report'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_results.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestResults(Script):
5 | script_name = 'teuthology-results'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_run.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | from script import Script
4 | from scripts import run
5 |
6 | doc = run.__doc__
7 |
8 |
9 | class TestRun(Script):
10 | script_name = 'teuthology'
11 |
12 | def test_all_args(self):
13 | args = docopt.docopt(doc, [
14 | "--verbose",
15 | "--archive", "some/archive/dir",
16 | "--description", "the_description",
17 | "--owner", "the_owner",
18 | "--lock",
19 | "--machine-type", "machine_type",
20 | "--os-type", "os_type",
21 | "--os-version", "os_version",
22 | "--block",
23 | "--name", "the_name",
24 | "--suite-path", "some/suite/dir",
25 | "path/to/config.yml",
26 | ])
27 | assert args["--verbose"]
28 | assert args["--archive"] == "some/archive/dir"
29 | assert args["--description"] == "the_description"
30 | assert args["--owner"] == "the_owner"
31 | assert args["--lock"]
32 | assert args["--machine-type"] == "machine_type"
33 | assert args["--os-type"] == "os_type"
34 | assert args["--os-version"] == "os_version"
35 | assert args["--block"]
36 | assert args["--name"] == "the_name"
37 | assert args["--suite-path"] == "some/suite/dir"
38 | assert args[""] == ["path/to/config.yml"]
39 |
40 | def test_multiple_configs(self):
41 | args = docopt.docopt(doc, [
42 | "config1.yml",
43 | "config2.yml",
44 | ])
45 | assert args[""] == ["config1.yml", "config2.yml"]
46 |
--------------------------------------------------------------------------------
/scripts/test/test_schedule.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestSchedule(Script):
5 | script_name = 'teuthology-schedule'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_suite.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestSuite(Script):
5 | script_name = 'teuthology-suite'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_supervisor_.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 |
3 |
4 | class TestSupervisor(Script):
5 | script_name = 'teuthology-supervisor'
6 |
--------------------------------------------------------------------------------
/scripts/test/test_updatekeys.py:
--------------------------------------------------------------------------------
1 | from script import Script
2 | import subprocess
3 | from pytest import raises
4 | from pytest import skip
5 |
6 |
7 | class TestUpdatekeys(Script):
8 | script_name = 'teuthology-updatekeys'
9 |
10 | def test_invalid(self):
11 | skip("teuthology.lock needs to be partially refactored to allow" +
12 | "teuthology-updatekeys to return nonzero in all erorr cases")
13 |
14 | def test_all_and_targets(self):
15 | args = (self.script_name, '-a', '-t', 'foo')
16 | with raises(subprocess.CalledProcessError):
17 | subprocess.check_call(args)
18 |
19 | def test_no_args(self):
20 | with raises(subprocess.CalledProcessError):
21 | subprocess.check_call(self.script_name)
22 |
--------------------------------------------------------------------------------
/scripts/update_inventory.py:
--------------------------------------------------------------------------------
1 | import docopt
2 |
3 | import teuthology
4 | import teuthology.lock
5 | import teuthology.lock.ops
6 | import teuthology.misc
7 | import teuthology.orchestra.remote
8 |
9 | import logging
10 |
11 | doc = """
12 | usage: teuthology-update-inventory -h
13 | teuthology-update-inventory [-v] [-m type] REMOTE [REMOTE ...]
14 |
15 | Update the given nodes' inventory information on the lock server
16 |
17 |
18 | -h, --help show this help message and exit
19 | -v, --verbose be more verbose
20 | -m , --machine-type optionally specify a machine type when
21 | submitting nodes for the first time
22 | REMOTE hostnames of machines whose information to update
23 |
24 | """
25 |
26 |
27 | def main():
28 | args = docopt.docopt(doc)
29 | if args['--verbose']:
30 | teuthology.log.setLevel(logging.DEBUG)
31 |
32 | machine_type = args.get('--machine-type')
33 | remotes = args.get('REMOTE')
34 | for rem_name in remotes:
35 | rem_name = teuthology.misc.canonicalize_hostname(rem_name)
36 | remote = teuthology.orchestra.remote.Remote(rem_name)
37 | remote.connect()
38 | inventory_info = remote.inventory_info
39 | if machine_type:
40 | inventory_info['machine_type'] = machine_type
41 | teuthology.lock.ops.update_inventory(inventory_info)
42 |
--------------------------------------------------------------------------------
/scripts/updatekeys.py:
--------------------------------------------------------------------------------
1 | import docopt
2 | import sys
3 |
4 | import teuthology.lock
5 | import teuthology.lock.cli
6 |
7 | doc = """
8 | usage: teuthology-updatekeys -h
9 | teuthology-updatekeys [-v] -t
10 | teuthology-updatekeys [-v] ...
11 | teuthology-updatekeys [-v] -a
12 |
13 | Update any hostkeys that have changed. You can list specific machines to run
14 | on, or use -a to check all of them automatically.
15 |
16 | positional arguments:
17 | MACHINES hosts to check for updated keys
18 |
19 | optional arguments:
20 | -h, --help Show this help message and exit
21 | -v, --verbose Be more verbose
22 | -t , --targets
23 | Input yaml containing targets to check
24 | -a, --all Update hostkeys of all machines in the db
25 | """
26 |
27 |
28 | def main():
29 | args = docopt.docopt(doc)
30 | status = teuthology.lock.cli.updatekeys(args)
31 | sys.exit(status)
32 |
--------------------------------------------------------------------------------
/scripts/wait.py:
--------------------------------------------------------------------------------
1 | import docopt
2 | import sys
3 |
4 | import logging
5 |
6 | import teuthology
7 | import teuthology.suite
8 | from teuthology.config import config
9 |
10 | doc = """
11 | usage: teuthology-wait --help
12 | teuthology-wait [-v] --run
13 |
14 | Wait until run is finished. Returns exit code 0 on success, otherwise 1.
15 |
16 | Miscellaneous arguments:
17 | -h, --help Show this help message and exit
18 | -v, --verbose Be more verbose
19 |
20 | Standard arguments:
21 | -r, --run Run name to watch.
22 | """
23 |
24 |
25 | def main(argv=sys.argv[1:]):
26 | args = docopt.docopt(doc, argv=argv)
27 | if args.get('--verbose'):
28 | teuthology.log.setLevel(logging.DEBUG)
29 | name = args.get('--run')
30 | return teuthology.suite.wait(name, config.max_job_time, None)
31 |
32 |
--------------------------------------------------------------------------------
/systemd/teuthology-dispatcher@.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Teuthology Dispatcher
3 |
4 | Wants=ceph.target
5 | After=ceph.target
6 |
7 | [Service]
8 | Type=simple
9 | User=teuthworker
10 | ExecStart=/home/teuthworker/src/git.ceph.com_git_teuthology_main/virtualenv/bin/python3 \
11 | /home/teuthworker/src/git.ceph.com_git_teuthology_main/virtualenv/bin/teuthology-dispatcher \
12 | -v \
13 | --archive-dir /home/teuthworker/archive \
14 | --tube %i \
15 | --log-dir /home/teuthworker/archive/worker_logs
16 | ExecStop=touch /tmp/teuthology-stop-dispatcher
17 | Restart=on-failure
18 | TimeoutStopSec=infinity
19 |
--------------------------------------------------------------------------------
/systemd/teuthology-exporter.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Teuthology Exporter
3 |
4 | Wants=ceph.target
5 | After=ceph.target
6 |
7 | [Service]
8 | Type=simple
9 | User=teuthworker
10 | ExecStart=/home/teuthworker/src/git.ceph.com_git_teuthology_main/virtualenv/bin/teuthology-exporter
11 | Restart=on-failure
12 | TimeoutStopSec=60
13 |
--------------------------------------------------------------------------------
/teuthology/exit.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import signal
4 |
5 |
6 | log = logging.getLogger(__name__)
7 |
8 |
9 | class Exiter(object):
10 | """
11 | A helper to manage any signal handlers we need to call upon receiving a
12 | given signal
13 | """
14 | def __init__(self):
15 | self.handlers = list()
16 |
17 | def add_handler(self, signals, func):
18 | """
19 | Adds a handler function to be called when any of the given signals are
20 | received.
21 |
22 | The handler function should have a signature like::
23 |
24 | my_handler(signal, frame)
25 | """
26 | if isinstance(signals, int):
27 | signals = [signals]
28 |
29 | for signal_ in signals:
30 | signal.signal(signal_, self.default_handler)
31 |
32 | handler = Handler(self, func, signals)
33 | log.debug(
34 | "Installing handler: %s",
35 | repr(handler),
36 | )
37 | self.handlers.append(handler)
38 | return handler
39 |
40 | def default_handler(self, signal_, frame):
41 | log.debug(
42 | "Got signal %s; running %s handler%s...",
43 | signal_,
44 | len(self.handlers),
45 | '' if len(self.handlers) == 1 else 's',
46 | )
47 | for handler in self.handlers:
48 | handler.func(signal_, frame)
49 | log.debug("Finished running handlers")
50 | # Restore the default handler
51 | signal.signal(signal_, 0)
52 | # Re-send the signal to our main process
53 | os.kill(os.getpid(), signal_)
54 |
55 |
56 | class Handler(object):
57 | def __init__(self, exiter, func, signals):
58 | self.exiter = exiter
59 | self.func = func
60 | self.signals = signals
61 |
62 | def remove(self):
63 | try:
64 | log.debug("Removing handler: %s", self)
65 | self.exiter.handlers.remove(self)
66 | except ValueError:
67 | pass
68 |
69 | def __repr__(self):
70 | return "{c}(exiter={e}, func={f}, signals={s})".format(
71 | c=self.__class__.__name__,
72 | e=self.exiter,
73 | f=self.func,
74 | s=self.signals,
75 | )
76 |
77 |
78 | exiter = Exiter()
79 |
--------------------------------------------------------------------------------
/teuthology/job_status.py:
--------------------------------------------------------------------------------
1 | def get_status(summary):
2 | """
3 | :param summary: The job summary dict. Normally ctx.summary
4 | :returns: A status string like 'pass', 'fail', or 'dead'
5 | """
6 | status = summary.get('status')
7 | if status is not None:
8 | return status
9 |
10 | success = summary.get('success')
11 | if success is True:
12 | status = 'pass'
13 | elif success is False:
14 | status = 'fail'
15 | else:
16 | status = None
17 | return status
18 |
19 |
20 | def set_status(summary, status):
21 | """
22 | Sets summary['status'] to status, and summary['success'] to True if status
23 | is 'pass'. If status is not 'pass', then 'success' is False.
24 |
25 | If status is None, do nothing.
26 |
27 | :param summary: The job summary dict. Normally ctx.summary
28 | :param status: The job status, e.g. 'pass', 'fail', 'dead'
29 | """
30 | if status is None:
31 | return
32 |
33 | summary['status'] = status
34 | if status == 'pass':
35 | summary['success'] = True
36 | else:
37 | summary['success'] = False
38 |
39 |
--------------------------------------------------------------------------------
/teuthology/lock/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/lock/__init__.py
--------------------------------------------------------------------------------
/teuthology/lock/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/lock/test/__init__.py
--------------------------------------------------------------------------------
/teuthology/lock/test/test_lock.py:
--------------------------------------------------------------------------------
1 | import teuthology.lock.util
2 |
3 | class TestLock(object):
4 |
5 | def test_locked_since_seconds(self):
6 | node = { "locked_since": "2013-02-07 19:33:55.000000" }
7 | assert teuthology.lock.util.locked_since_seconds(node) > 3600
8 |
--------------------------------------------------------------------------------
/teuthology/ls.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 | import yaml
5 | import errno
6 | import re
7 |
8 | from teuthology.job_status import get_status
9 |
10 |
11 | def main(args):
12 | return ls(args[""], args["--verbose"])
13 |
14 |
15 | def ls(archive_dir, verbose):
16 | for j in get_jobs(archive_dir):
17 | job_dir = os.path.join(archive_dir, j)
18 | summary = {}
19 | try:
20 | with open(os.path.join(job_dir, 'summary.yaml')) as f:
21 | g = yaml.safe_load_all(f)
22 | for new in g:
23 | summary.update(new)
24 | except IOError as e:
25 | if e.errno == errno.ENOENT:
26 | print_debug_info(j, job_dir, archive_dir)
27 | continue
28 | else:
29 | raise
30 |
31 | print("{job} {status} {owner} {desc} {duration}s".format(
32 | job=j,
33 | owner=summary.get('owner', '-'),
34 | desc=summary.get('description', '-'),
35 | status=get_status(summary),
36 | duration=int(summary.get('duration', 0)),
37 | ))
38 | if verbose and 'failure_reason' in summary:
39 | print(' {reason}'.format(reason=summary['failure_reason']))
40 |
41 |
42 | def get_jobs(archive_dir):
43 | dir_contents = os.listdir(archive_dir)
44 |
45 | def is_job_dir(parent, subdir):
46 | if (os.path.isdir(os.path.join(parent, subdir)) and re.match(r'\d+$',
47 | subdir)):
48 | return True
49 | return False
50 |
51 | jobs = [job for job in dir_contents if is_job_dir(archive_dir, job)]
52 | return sorted(jobs)
53 |
54 |
55 | def print_debug_info(job, job_dir, archive_dir):
56 | print('%s ' % job, end='')
57 |
58 | try:
59 | log_path = os.path.join(archive_dir, job, 'teuthology.log')
60 | if os.path.exists(log_path):
61 | tail = os.popen(
62 | 'tail -1 %s' % log_path
63 | ).read().rstrip()
64 | print(tail, end='')
65 | else:
66 | print('', end='')
67 | except IOError:
68 | pass
69 | print('')
70 |
--------------------------------------------------------------------------------
/teuthology/nuke/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | log = logging.getLogger(__name__)
4 |
5 |
6 | # This is being kept because ceph.git/qa/tasks/cephfs/filesystem.py references it.
7 | def clear_firewall(ctx):
8 | """
9 | Remove any iptables rules created by teuthology. These rules are
10 | identified by containing a comment with 'teuthology' in it. Non-teuthology
11 | firewall rules are unaffected.
12 | """
13 | log.info("Clearing teuthology firewall rules...")
14 | ctx.cluster.run(
15 | args=[
16 | "sudo", "sh", "-c",
17 | "iptables-save | grep -v teuthology | iptables-restore"
18 | ],
19 | )
20 | log.info("Cleared teuthology firewall rules.")
21 |
--------------------------------------------------------------------------------
/teuthology/openstack/archive-key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEowIBAAKCAQEAvLz+sao32JL/yMgwTFDTnQVZK3jyXlhQJpHLsgwgHWHQ/27L
3 | fwEbGFVYsJNBGntZwCZvH/K4c0IevbnX/Y69qgmAc9ZpZQLIcIF0A8hmwVYRU+Ap
4 | TAK2qAvadThWfiRBA6+SGoRy6VV5MWeq+hqlGf9axRKqhECNhHuGBuBeosUOZOOH
5 | NVzvFIbp/4842yYrZUDnDzW7JX2kYGi6kaEAYeR8qYJgT/95Pm4Bgu1V7MI36rx1
6 | O/5BSPF3LvDSnnaZyHCDZtwzC50lBnS2nx8kKPmmdKBSEJoTdNRPIXZ/lMq5pzIW
7 | QPDjI8O5pbX1BJcxfFlZ/h+bI6u8IX3vfTGHWwIDAQABAoIBAG5yLp0rHfkXtKT7
8 | OQA/wEW/znmZEkPRbD3VzZyIafanuhTv8heFPyTTNM5Hra5ghpniI99PO07/X1vp
9 | OBMCB81MOCYRT6WzpjXoG0rnZ/I1enhZ0fDQGbFnFlTIPh0c/Aq7IEVyQoh24y/d
10 | GXm4Q+tdufFfRfeUivv/CORXQin/Iugbklj8erjx+fdVKPUXilmDIEVleUncer5/
11 | K5Fxy0lWbm6ZX1fE+rfJvCwNjAaIJgrN8TWUTE8G72F9Y0YU9hRtqOZe6MMbSufy
12 | 5+/yj2Vgp+B8Id7Ass2ylDQKsjBett/M2bNKt/DUVIiaxKi0usNSerLvtbkWEw9s
13 | tgUI6ukCgYEA6qqnZwkbgV0lpj1MrQ3BRnFxNR42z2MyEY5xRGaYp22ByxS207z8
14 | mM3EuLH8k2u6jzsGoPpBWhBbs97MuGDHwsMEO5rBpytnTE4Hxrgec/13Arzk4Bme
15 | eqg1Ji+lNkoLzEHkuihskcZwnQ8uaOdqrnH/NRGuUhA9hjeh+lQzBy8CgYEAzeV1
16 | zYsw8xIBFtbmFhBQ8imHr0SQalTiQU2Qn46LORK0worsf4sZV5ZF3VBRdnCUwwbm
17 | 0XaMb3kE2UBlU8qPqLgxXPNjcEKuqtVlp76dT/lrXIhYUq+Famrf20Lm01kC5itz
18 | QF247hnUfo2uzxpatuEr2ggs2NjuODn57tVw95UCgYEAv0s+C5AxC9OSzWFLEAcW
19 | dwYi8toedBC4z/b9/nRkHJf4JkRMhW6ZuzaCFs2Ax+wZuIi1bqSSgYi0OHx3BhZe
20 | wTWYTb5p/owzONCjJisRKByG14SETuqTdgmIyggs9YSG+Yr9mYM6fdr2EhI+EuYS
21 | 4QGsuOYg5GS4wqC3OglJT6ECgYA8y28QRPQsIXnO259OjnzINDkLKGyX6P5xl8yH
22 | QFidfod/FfQk6NaPxSBV67xSA4X5XBVVbfKji5FB8MC6kAoBIHn63ybSY+4dJSuB
23 | 70eV8KihxuSFbawwMuRsYoGzkAnKGrRKIiJTs67Ju14NatO0QiJnm5haYxtb4MqK
24 | md1kTQKBgDmTxtSBVOV8eMhl076OoOvdnpb3sy/obI/XUvurS0CaAcqmkVSNJ6c+
25 | g1O041ocTbuW5d3fbzo9Jyle6qsvUQd7fuoUfAMrd0inKsuYPPM0IZOExbt8QqLI
26 | KFJ+r/nQYoJkmiNO8PssxcP3CMFB6TpUx0BgFcrhH//TtKKNrGTl
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/teuthology/openstack/archive-key.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8vP6xqjfYkv/IyDBMUNOdBVkrePJeWFAmkcuyDCAdYdD/bst/ARsYVViwk0Eae1nAJm8f8rhzQh69udf9jr2qCYBz1mllAshwgXQDyGbBVhFT4ClMAraoC9p1OFZ+JEEDr5IahHLpVXkxZ6r6GqUZ/1rFEqqEQI2Ee4YG4F6ixQ5k44c1XO8Uhun/jzjbJitlQOcPNbslfaRgaLqRoQBh5HypgmBP/3k+bgGC7VXswjfqvHU7/kFI8Xcu8NKedpnIcINm3DMLnSUGdLafHyQo+aZ0oFIQmhN01E8hdn+UyrmnMhZA8OMjw7mltfUElzF8WVn+H5sjq7whfe99MYdb loic@fold
2 |
--------------------------------------------------------------------------------
/teuthology/openstack/bootstrap-teuthology.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -ex
2 | TEUTH_PATH=${1:-"teuthology"}
3 | TEUTH_GIT=${2:-"https://github.com/ceph/teuthology"}
4 | TEUTH_BRANCH=${3:-"main"}
5 |
6 | mkdir -p $TEUTH_PATH
7 | git init $TEUTH_PATH
8 |
9 | pushd $TEUTH_PATH
10 |
11 | echo Fetch upstream changes from $TEUTH_GIT
12 | git fetch --tags --progress $TEUTH_GIT +refs/heads/*:refs/remotes/origin/*
13 | git config remote.origin.url $TEUTH_GIT
14 | git config --add remote.origin.fetch +refs/heads/*:refs/remotes/origin/*
15 | git config remote.origin.url $TEUTH_GIT
16 |
17 | # Check if branch has form origin/pr/*/merge
18 | isPR="^origin\/pr\/"
19 | if [[ "$TEUTH_BRANCH" =~ $isPR ]] ; then
20 |
21 | git fetch --tags --progress https://github.com/suse/teuthology +refs/pull/*:refs/remotes/origin/pr/*
22 | rev=$(git rev-parse refs/remotes/$TEUTH_BRANCH^{commit})
23 |
24 | git config core.sparsecheckout
25 | git checkout -f $rev
26 | else
27 | git checkout $TEUTH_BRANCH
28 | fi
29 |
30 | ./bootstrap install
31 |
32 | popd
33 |
34 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-basic.yaml:
--------------------------------------------------------------------------------
1 | overrides:
2 | ceph:
3 | conf:
4 | global:
5 | osd heartbeat grace: 100
6 | # this line to address issue #1017
7 | mon lease: 15
8 | mon lease ack timeout: 25
9 | s3tests:
10 | idle_timeout: 1200
11 | ceph-fuse:
12 | client.0:
13 | mount_wait: 60
14 | mount_timeout: 120
15 | archive-on-error: true
16 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-buildpackages.yaml:
--------------------------------------------------------------------------------
1 | tasks:
2 | - buildpackages:
3 | good_machine:
4 | disk: 100 # GB
5 | ram: 15000 # MB
6 | cpus: 16
7 | min_machine:
8 | disk: 100 # GB
9 | ram: 8000 # MB
10 | cpus: 1
11 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-6.5-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - yum install -y yum-utils && yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/6/x86_64/ && yum install --nogpgcheck -y epel-release && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 && rm /etc/yum.repos.d/dl.fedoraproject.org*
9 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - dracut-modules-growroot
21 | runcmd:
22 | - mkinitrd --force /boot/initramfs-2.6.32-573.3.1.el6.x86_64.img 2.6.32-573.3.1.el6.x86_64
23 | - reboot
24 | final_message: "{up}, after $UPTIME seconds"
25 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - redhat-lsb-core
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - redhat-lsb-core
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -ie 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network-scripts/ifcfg-*
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | - redhat-lsb-core
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-centos-7.3-user-data.txt:
--------------------------------------------------------------------------------
1 | openstack-centos-7.2-user-data.txt
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-debian-7.0-user-data.txt:
--------------------------------------------------------------------------------
1 | openstack-ubuntu-14.04-user-data.txt
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-debian-8.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | preserve_hostname: true
12 | system_info:
13 | default_user:
14 | name: {username}
15 | packages:
16 | - python
17 | - wget
18 | - git
19 | - ntp
20 | runcmd:
21 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
22 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
23 | - echo '{username} ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
24 | final_message: "{up}, after $UPTIME seconds"
25 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - zypper --non-interactive --gpg-auto-import-keys refresh
20 | - zypper --non-interactive remove --force librados2 librbd1 multipath-tools-rbd qemu-block-rbd ntp
21 | - zypper --non-interactive install --no-recommends --force wget git-core rsyslog lsb-release make gcc gcc-c++ salt-master salt-minion salt-api chrony
22 | - systemctl enable chronyd.service
23 | - systemctl start chronyd.service
24 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
25 | - sleep 30
26 | final_message: "{up}, after $UPTIME seconds"
27 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-15.1-user-data.txt:
--------------------------------------------------------------------------------
1 | openstack-opensuse-15.0-user-data.txt
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-42.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - zypper --non-interactive --no-gpg-checks refresh
20 | - zypper --non-interactive remove systemd-logger
21 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
22 | lsb-release salt-minion salt-master make
23 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
24 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
25 | - systemctl enable salt-minion.service ntpd.service
26 | - systemctl restart ntpd.service
27 | final_message: "{up}, after $UPTIME seconds"
28 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-42.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - 'zypper rr openSUSE-Leap-Cloud-Tools || :'
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive remove systemd-logger
22 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
23 | lsb-release salt-minion salt-master make gcc gcc-c++
24 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
25 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
26 | - systemctl enable salt-minion.service ntpd.service
27 | - systemctl restart ntpd.service
28 | final_message: "{up}, after $UPTIME seconds"
29 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-opensuse-42.3-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - 'zypper rr openSUSE-Leap-Cloud-Tools || :'
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive remove systemd-logger
22 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog lsb-release make gcc gcc-c++
23 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
24 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
25 | - systemctl enable ntpd.service
26 | - systemctl restart ntpd.service
27 | final_message: "{up}, after $UPTIME seconds"
28 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-12.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | - SuSEfirewall2 stop
12 | preserve_hostname: true
13 | users:
14 | - name: {username}
15 | gecos: User
16 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
17 | groups: users
18 | runcmd:
19 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
22 | lsb-release make
23 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
24 | - systemctl restart ntpd.service
25 | final_message: "{up}, after $UPTIME seconds"
26 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-12.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | - SuSEfirewall2 stop
12 | preserve_hostname: true
13 | users:
14 | - name: {username}
15 | gecos: User
16 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
17 | groups: users
18 | runcmd:
19 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
20 | - zypper --non-interactive --no-gpg-checks refresh
21 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog
22 | lsb-release salt-minion salt-master make gcc gcc-c++
23 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
24 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
25 | - systemctl enable salt-minion.service ntpd.service
26 | - systemctl restart ntpd.service
27 | final_message: "{up}, after $UPTIME seconds"
28 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-12.3-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | - SuSEfirewall2 stop
12 | preserve_hostname: true
13 | users:
14 | - name: {username}
15 | gecos: User
16 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
17 | groups: users
18 | runcmd:
19 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
20 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
21 | - ( if ! grep '^server' /etc/ntp.conf ; then for i in 0 1 2 3 ; do echo "server $i.opensuse.pool.ntp.org iburst" >> /etc/ntp.conf ; done ; fi )
22 | - systemctl enable salt-minion.service ntpd.service
23 | - systemctl restart ntpd.service
24 | final_message: "{up}, after $UPTIME seconds"
25 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-eth0
6 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
7 | - hostname $(cat /etc/hostname)
8 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
9 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
10 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
11 | preserve_hostname: true
12 | users:
13 | - name: {username}
14 | gecos: User
15 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
16 | groups: users
17 | runcmd:
18 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
19 | - zypper --non-interactive --no-gpg-checks refresh
20 | - zypper --non-interactive install --no-recommends wget rsyslog lsb-release make gcc gcc-c++ chrony
21 | - sed -i -e 's/^! pool/pool/' /etc/chrony.conf
22 | - systemctl enable chronyd.service
23 | - systemctl start chronyd.service
24 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
25 | final_message: "{up}, after $UPTIME seconds"
26 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-sle-15.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver {nameserver} | tee /etc/resolv.conf
4 | - echo search {lab_domain} | tee -a /etc/resolv.conf
5 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
6 | # See https://github.com/ceph/ceph-cm-ansible/blob/main/roles/cobbler/templates/snippets/cephlab_user
7 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo ; chmod 0440 /etc/sudoers.d/cephlab_sudo
8 | preserve_hostname: true
9 | users:
10 | - name: {username}
11 | gecos: User
12 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
13 | groups: users
14 | runcmd:
15 | - |
16 | for i in $(seq 1 30) ; do
17 | ping -q -c 1 8.8.8.8 && break
18 | sleep 10
19 | done
20 | ETH=$(ip route list | grep "scope link" | cut -f 3 -d ' ')
21 | sed -i -e 's/PEERDNS="yes"/PEERDNS="no"/' /etc/sysconfig/network/ifcfg-$ETH
22 | (
23 | curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname |
24 | sed -e 's/[\.-].*//'
25 | eval printf "%03d%03d%03d%03d.{lab_domain}" $(
26 | curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 |
27 | tr . ' ' )
28 | ) | tee /etc/hostname
29 | hostname $(cat /etc/hostname)
30 | - ( MYHOME=/home/{username} ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R {username}.users $MYHOME/.ssh )
31 | - zypper --non-interactive --no-gpg-checks refresh
32 | - zypper --non-interactive install --no-recommends wget rsyslog lsb-release make gcc gcc-c++ chrony
33 | - sed -i -e 's/^! pool/pool/' /etc/chrony.conf
34 | - systemctl enable chronyd.service
35 | - systemctl start chronyd.service
36 | - sed -i -e "s/^#master:.*$/master:\ $(curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//')$(eval printf "%03d%03d%03d%03d.{lab_domain}" $(echo "{nameserver}" | tr . ' '))/" /etc/salt/minion
37 | final_message: "{up}, after $UPTIME seconds"
38 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-teuthology.cron:
--------------------------------------------------------------------------------
1 | SHELL=/bin/bash
2 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-ubuntu-12.04-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( curl --silent http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | preserve_hostname: true
12 | manage_etc_hosts: true
13 | system_info:
14 | default_user:
15 | name: {username}
16 | packages:
17 | - python
18 | - wget
19 | - git
20 | - ntp
21 | runcmd:
22 | - dpkg -l python wget git ntp >> /var/log/cloud-init-output.log
23 | - echo "{up}" >> /var/log/cloud-init-output.log
24 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-ubuntu-14.04-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( wget -qO - http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(wget -qO - http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | manage_etc_hosts: true
12 | preserve_hostname: true
13 | system_info:
14 | default_user:
15 | name: {username}
16 | packages:
17 | - python
18 | - wget
19 | - git
20 | - ntp
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-ubuntu-16.04-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - apt-get remove --purge -y resolvconf || true
4 | - echo 'prepend domain-name-servers {nameserver};' | tee -a /etc/dhcp/dhclient.conf
5 | - echo 'supersede domain-name "{lab_domain}";' | tee -a /etc/dhcp/dhclient.conf
6 | - ifdown -a ; ifup -a
7 | - grep --quiet {nameserver} /etc/resolv.conf || ( echo 'nameserver {nameserver}' ; echo 'search {lab_domain}' ) | tee /etc/resolv.conf
8 | - ( wget -qO - http://169.254.169.254/2009-04-04/meta-data/hostname | sed -e 's/[\.-].*//' ; eval printf "%03d%03d%03d%03d.{lab_domain}" $(wget -qO - http://169.254.169.254/2009-04-04/meta-data/local-ipv4 | tr . ' ' ) ) | tee /etc/hostname
9 | - hostname $(cat /etc/hostname)
10 | - echo "MaxSessions 1000" >> /etc/ssh/sshd_config
11 | manage_etc_hosts: true
12 | preserve_hostname: true
13 | system_info:
14 | default_user:
15 | name: {username}
16 | packages:
17 | - python
18 | - wget
19 | - git
20 | - ntp
21 | final_message: "{up}, after $UPTIME seconds"
22 |
--------------------------------------------------------------------------------
/teuthology/openstack/openstack-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - touch /tmp/init.out
4 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
5 | manage_etc_hosts: true
6 | system_info:
7 | default_user:
8 | name: TEUTHOLOGY_USERNAME
9 | packages:
10 | - python-virtualenv
11 | - git
12 | - rsync
13 | runcmd:
14 | - su - -c '(set -x ; CLONE_OPENSTACK && cd teuthology && ./bootstrap install)' TEUTHOLOGY_USERNAME >> /tmp/init.out 2>&1
15 | - echo 'export OPENRC' | tee /home/TEUTHOLOGY_USERNAME/openrc.sh
16 | - su - -c '(set -x ; source openrc.sh ; cd teuthology ; source virtualenv/bin/activate ; teuthology/openstack/setup-openstack.sh --nworkers NWORKERS UPLOAD CEPH_WORKBENCH CANONICAL_TAGS SETUP_OPTIONS)' TEUTHOLOGY_USERNAME >> /tmp/init.out 2>&1
17 | # wa: we want to stop paddles and pulpito started by setup-openstack, before start teuthology service
18 | - pkill -f 'pecan serve'
19 | - pkill -f 'python run.py'
20 | - systemctl enable teuthology
21 | - systemctl start teuthology
22 | final_message: "teuthology is up and running after $UPTIME seconds"
23 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/openstack/test/__init__.py
--------------------------------------------------------------------------------
/teuthology/openstack/test/archive-on-error.yaml:
--------------------------------------------------------------------------------
1 | archive-on-error: true
2 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/noop.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | machine_type: openstack
3 | os_type: ubuntu
4 | os_version: "14.04"
5 | roles:
6 | - - mon.a
7 | - osd.0
8 | tasks:
9 | - exec:
10 | mon.a:
11 | - echo "Well done !"
12 |
13 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/resources_hint.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | machine_type: openstack
3 | openstack:
4 | - machine:
5 | disk: 10 # GB
6 | ram: 10000 # MB
7 | cpus: 1
8 | volumes:
9 | count: 1
10 | size: 2 # GB
11 | os_type: ubuntu
12 | os_version: "14.04"
13 | roles:
14 | - - mon.a
15 | - osd.0
16 | tasks:
17 | - exec:
18 | mon.a:
19 | - test $(sed -n -e 's/MemTotal.* \([0-9][0-9]*\).*/\1/p' < /proc/meminfo) -ge 10000000 && echo "RAM" "size" "ok"
20 | - cat /proc/meminfo
21 | # wait for the attached volume to show up
22 | - for delay in 1 2 4 8 16 32 64 128 256 512 ; do if test -e /sys/block/vdb/size ; then break ; else sleep $delay ; fi ; done
23 | # 4000000 because 512 bytes sectors
24 | - test $(cat /sys/block/vdb/size) -gt 4000000 && echo "Disk" "size" "ok"
25 | - cat /sys/block/vdb/size
26 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/resources_hint_no_cinder.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | machine_type: openstack
3 | openstack:
4 | - machine:
5 | disk: 10 # GB
6 | ram: 10000 # MB
7 | cpus: 1
8 | volumes:
9 | count: 0
10 | size: 2 # GB
11 | os_type: ubuntu
12 | os_version: "14.04"
13 | roles:
14 | - - mon.a
15 | - osd.0
16 | tasks:
17 | - exec:
18 | mon.a:
19 | - cat /proc/meminfo
20 | - test $(sed -n -e 's/MemTotal.* \([0-9][0-9]*\).*/\1/p' < /proc/meminfo) -ge 10000000 && echo "RAM" "size" "ok"
21 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/stop_worker.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/suites/noop/+:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/openstack/test/suites/noop/+
--------------------------------------------------------------------------------
/teuthology/openstack/test/suites/noop/noop.yaml:
--------------------------------------------------------------------------------
1 | stop_worker: true
2 | roles:
3 | - - mon.a
4 | - osd.0
5 | tasks:
6 | - exec:
7 | mon.a:
8 | - echo "Well done !"
9 |
10 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/suites/nuke/+:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/openstack/test/suites/nuke/+
--------------------------------------------------------------------------------
/teuthology/openstack/test/test_config.py:
--------------------------------------------------------------------------------
1 | from teuthology.config import config
2 |
3 |
4 | class TestOpenStack(object):
5 |
6 | def setup_method(self):
7 | self.openstack_config = config['openstack']
8 |
9 | def test_config_clone(self):
10 | assert 'clone' in self.openstack_config
11 |
12 | def test_config_user_data(self):
13 | os_type = 'rhel'
14 | os_version = '7.0'
15 | template_path = self.openstack_config['user-data'].format(
16 | os_type=os_type,
17 | os_version=os_version)
18 | assert os_type in template_path
19 | assert os_version in template_path
20 |
21 | def test_config_ip(self):
22 | assert 'ip' in self.openstack_config
23 |
24 | def test_config_machine(self):
25 | assert 'machine' in self.openstack_config
26 | machine_config = self.openstack_config['machine']
27 | assert 'disk' in machine_config
28 | assert 'ram' in machine_config
29 | assert 'cpus' in machine_config
30 |
31 | def test_config_volumes(self):
32 | assert 'volumes' in self.openstack_config
33 | volumes_config = self.openstack_config['volumes']
34 | assert 'count' in volumes_config
35 | assert 'size' in volumes_config
36 |
--------------------------------------------------------------------------------
/teuthology/openstack/test/user-data-test1.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | system_info:
3 | default_user:
4 | name: ubuntu
5 | final_message: "teuthology is up and running after $UPTIME seconds, substituded variables nworkers=NWORKERS openrc=OPENRC username=TEUTHOLOGY_USERNAME upload=UPLOAD ceph_workbench=CEPH_WORKBENCH clone=CLONE_OPENSTACK"
6 |
--------------------------------------------------------------------------------
/teuthology/orchestra/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/orchestra/__init__.py
--------------------------------------------------------------------------------
/teuthology/orchestra/daemon/__init__.py:
--------------------------------------------------------------------------------
1 | from teuthology.orchestra.daemon.group import DaemonGroup # noqa
2 |
--------------------------------------------------------------------------------
/teuthology/orchestra/monkey.py:
--------------------------------------------------------------------------------
1 | """
2 | Monkey patches (paramiko support)
3 | """
4 | import logging
5 |
6 | log = logging.getLogger(__name__)
7 |
8 | def patch_001_paramiko_deprecation():
9 | """
10 | Silence an an unhelpful Deprecation Warning triggered by Paramiko.
11 |
12 | Not strictly a monkeypatch.
13 | """
14 | import warnings
15 | warnings.filterwarnings(
16 | category=DeprecationWarning,
17 | message='This application uses RandomPool,',
18 | action='ignore',
19 | )
20 |
21 |
22 | def patch_100_paramiko_log():
23 | """
24 | Silence some noise paramiko likes to log.
25 |
26 | Not strictly a monkeypatch.
27 | """
28 | logging.getLogger('paramiko.transport').setLevel(logging.WARNING)
29 |
30 |
31 | def patch_100_logger_getChild():
32 | """
33 | Imitate Python 2.7 feature Logger.getChild.
34 | """
35 | import logging
36 | if not hasattr(logging.Logger, 'getChild'):
37 | def getChild(self, name):
38 | return logging.getLogger('.'.join([self.name, name]))
39 | logging.Logger.getChild = getChild
40 |
41 |
42 | def patch_100_trigger_rekey():
43 | # Fixes http://tracker.ceph.com/issues/15236
44 | from paramiko.packet import Packetizer
45 | Packetizer._trigger_rekey = lambda self: True
46 |
47 |
48 | def patch_all():
49 | """
50 | Run all the patch_* functions in this module.
51 | """
52 | monkeys = [(k, v) for (k, v) in globals().items() if k.startswith('patch_') and k != 'patch_all']
53 | monkeys.sort()
54 | for k, v in monkeys:
55 | log.debug('Patching %s', k)
56 | v()
57 |
--------------------------------------------------------------------------------
/teuthology/orchestra/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/orchestra/test/__init__.py
--------------------------------------------------------------------------------
/teuthology/orchestra/test/files/daemon-systemdstate-pid-ps-ef.output:
--------------------------------------------------------------------------------
1 | ceph 658 1 0 Jun08 ? 00:07:43 /usr/bin/ceph-mgr -f --cluster ceph --id host1 --setuser ceph --setgroup ceph
2 | ceph 1634 1 0 Jun08 ? 00:02:17 /usr/bin/ceph-mds -f --cluster ceph --id host1 --setuser ceph --setgroup ceph
3 | ceph 31555 1 0 Jun08 ? 01:13:50 /usr/bin/ceph-mon -f --cluster ceph --id host1 --setuser ceph --setgroup ceph
4 | ceph 31765 1 0 Jun08 ? 00:48:42 /usr/bin/radosgw -f --cluster ceph --name client.rgw.host1.rgw0 --setuser ceph --setgroup ceph
5 | ceph 97427 1 0 Jun17 ? 00:41:39 /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser ceph --setgroup ceph
--------------------------------------------------------------------------------
/teuthology/orchestra/test/integration/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/teuthology/eaeb97003cfc43fc86754e4e45e7b398c784dedf/teuthology/orchestra/test/integration/__init__.py
--------------------------------------------------------------------------------
/teuthology/orchestra/test/test_systemd.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | from logging import debug
5 | from teuthology import misc
6 | from teuthology.orchestra import cluster
7 | from teuthology.orchestra.run import quote
8 | from teuthology.orchestra.daemon.group import DaemonGroup
9 | import subprocess
10 |
11 |
12 | class FakeRemote(object):
13 | pass
14 |
15 |
16 | def test_pid():
17 | ctx = argparse.Namespace()
18 | ctx.daemons = DaemonGroup(use_systemd=True)
19 | remote = FakeRemote()
20 |
21 | ps_ef_output_path = os.path.join(
22 | os.path.dirname(__file__),
23 | "files/daemon-systemdstate-pid-ps-ef.output"
24 | )
25 |
26 | # patching ps -ef command output using a file
27 | def sh(args):
28 | args[0:2] = ["cat", ps_ef_output_path]
29 | debug(args)
30 | return subprocess.getoutput(quote(args))
31 |
32 | remote.sh = sh
33 | remote.init_system = 'systemd'
34 | remote.shortname = 'host1'
35 |
36 | ctx.cluster = cluster.Cluster(
37 | remotes=[
38 | (remote, ['rgw.0', 'mon.a', 'mgr.a', 'mds.a', 'osd.0'])
39 | ],
40 | )
41 |
42 | for remote, roles in ctx.cluster.remotes.items():
43 | for role in roles:
44 | _, rol, id_ = misc.split_role(role)
45 | if any(rol.startswith(x) for x in ['mon', 'mgr', 'mds']):
46 | ctx.daemons.register_daemon(remote, rol, remote.shortname)
47 | else:
48 | ctx.daemons.register_daemon(remote, rol, id_)
49 |
50 | for _, daemons in ctx.daemons.daemons.items():
51 | for daemon in daemons.values():
52 | pid = daemon.pid
53 | debug(pid)
54 | assert pid
55 |
--------------------------------------------------------------------------------
/teuthology/orchestra/test/util.py:
--------------------------------------------------------------------------------
1 | def assert_raises(excClass, callableObj, *args, **kwargs):
2 | """
3 | Like unittest.TestCase.assertRaises, but returns the exception.
4 | """
5 | try:
6 | callableObj(*args, **kwargs)
7 | except excClass as e:
8 | return e
9 | else:
10 | if hasattr(excClass,'__name__'): excName = excClass.__name__
11 | else: excName = str(excClass)
12 | raise AssertionError("%s not raised" % excName)
13 |
--------------------------------------------------------------------------------
/teuthology/provision/cloud/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from teuthology.config import config
4 |
5 | from teuthology.provision.cloud import openstack
6 |
7 | log = logging.getLogger(__name__)
8 |
9 |
10 | supported_drivers = dict(
11 | openstack=dict(
12 | provider=openstack.OpenStackProvider,
13 | provisioner=openstack.OpenStackProvisioner,
14 | ),
15 | )
16 |
17 |
18 | def get_types():
19 | types = list()
20 | if 'libcloud' in config and 'providers' in config.libcloud:
21 | types = list(config.libcloud['providers'].keys())
22 | return types
23 |
24 |
25 | def get_provider_conf(node_type):
26 | all_providers = config.libcloud['providers']
27 | provider_conf = all_providers[node_type]
28 | return provider_conf
29 |
30 |
31 | def get_provider(node_type):
32 | provider_conf = get_provider_conf(node_type)
33 | driver = provider_conf['driver']
34 | provider_cls = supported_drivers[driver]['provider']
35 | return provider_cls(name=node_type, conf=provider_conf)
36 |
37 |
38 | def get_provisioner(node_type, name, os_type, os_version, conf=None):
39 | provider = get_provider(node_type)
40 | provider_conf = get_provider_conf(node_type)
41 | driver = provider_conf['driver']
42 | provisioner_cls = supported_drivers[driver]['provisioner']
43 | return provisioner_cls(
44 | provider=provider,
45 | name=name,
46 | os_type=os_type,
47 | os_version=os_version,
48 | conf=conf,
49 | )
50 |
--------------------------------------------------------------------------------
/teuthology/provision/cloud/test/test_cloud_init.py:
--------------------------------------------------------------------------------
1 | from teuthology.config import config
2 | from teuthology.provision import cloud
3 |
4 | dummy_config = dict(
5 | providers=dict(
6 | my_provider=dict(
7 | driver='dummy',
8 | driver_args=dict(
9 | creds=0,
10 | ),
11 | conf_1='1',
12 | conf_2='2',
13 | )
14 | )
15 | )
16 |
17 |
18 | class DummyProvider(cloud.base.Provider):
19 | # For libcloud's dummy driver
20 | _driver_posargs = ['creds']
21 |
22 | dummy_drivers = dict(
23 | provider=DummyProvider,
24 | provisioner=cloud.base.Provisioner,
25 | )
26 |
27 |
28 | class TestInit(object):
29 | def setup_method(self):
30 | config.load()
31 | config.libcloud = dummy_config
32 | cloud.supported_drivers['dummy'] = dummy_drivers
33 |
34 | def teardown_method(self):
35 | del cloud.supported_drivers['dummy']
36 |
37 | def test_get_types(self):
38 | assert list(cloud.get_types()) == ['my_provider']
39 |
40 | def test_get_provider_conf(self):
41 | expected = dummy_config['providers']['my_provider']
42 | assert cloud.get_provider_conf('my_provider') == expected
43 |
44 | def test_get_provider(self):
45 | obj = cloud.get_provider('my_provider')
46 | assert obj.name == 'my_provider'
47 | assert obj.driver_name == 'dummy'
48 |
49 | def test_get_provisioner(self):
50 | obj = cloud.get_provisioner(
51 | 'my_provider',
52 | 'node_name',
53 | 'ubuntu',
54 | '16.04',
55 | dict(foo='bar'),
56 | )
57 | assert obj.provider.name == 'my_provider'
58 | assert obj.name == 'node_name'
59 | assert obj.os_type == 'ubuntu'
60 | assert obj.os_version == '16.04'
61 |
--------------------------------------------------------------------------------
/teuthology/provision/cloud/test/test_openstack_userdata_conf.yaml:
--------------------------------------------------------------------------------
1 | libcloud:
2 | providers:
3 | my_provider:
4 | allow_networks:
5 | - sesci
6 | userdata:
7 | 'ubuntu-16.04':
8 | bootcmd:
9 | - 'SuSEfirewall2 stop || true'
10 | - 'service firewalld stop || true'
11 | runcmd:
12 | - 'uptime'
13 | - 'date'
14 | - 'zypper in -y lsb-release make gcc gcc-c++ chrony || true'
15 | - 'systemctl enable chronyd.service || true'
16 | - 'systemctl start chronyd.service || true'
17 | ssh_authorized_keys:
18 | - user_public_key1
19 | - user_public_key2
20 | driver: openstack
21 | driver_args:
22 | username: user
23 | password: password
24 | ex_force_auth_url: 'http://127.0.0.1:9999/v2.0/tokens'
25 |
--------------------------------------------------------------------------------
/teuthology/provision/test/test_init_provision.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | from pytest import raises
3 | from teuthology.config import config
4 |
5 | import teuthology.provision
6 |
7 | test_config = dict(
8 | pelagos=dict(
9 | endpoint='http://pelagos.example:5000/',
10 | machine_types='ptype1,ptype2,common_type',
11 | ),
12 | fog=dict(
13 | endpoint='http://fog.example.com/fog',
14 | api_token='API_TOKEN',
15 | user_token='USER_TOKEN',
16 | machine_types='ftype1,ftype2,common_type',
17 | )
18 | )
19 |
20 | class TestInitProvision(object):
21 |
22 | def setup_method(self):
23 | config.load(deepcopy(test_config))
24 |
25 | def test_get_reimage_types(self):
26 | reimage_types = teuthology.provision.get_reimage_types()
27 | assert reimage_types == ["ptype1", "ptype2", "common_type",
28 | "ftype1", "ftype2", "common_type"]
29 |
30 | def test_reimage(self):
31 | class context:
32 | pass
33 | ctx = context()
34 | ctx.os_type = 'sle'
35 | ctx.os_version = '15.1'
36 | with raises(Exception) as e_info:
37 | teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'not-defined-type')
38 | e_str = str(e_info)
39 | print("Caught exception: " + e_str)
40 | assert e_str.find(r"configured\sprovisioners") == -1
41 |
42 | with raises(Exception) as e_info:
43 | teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'common_type')
44 | e_str = str(e_info)
45 | print("Caught exception: " + e_str)
46 | assert e_str.find(r"used\swith\sone\sprovisioner\sonly") == -1
47 |
--------------------------------------------------------------------------------
/teuthology/provision/test/test_pelagos.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | from pytest import raises
3 | from teuthology.config import config
4 | from teuthology.provision import pelagos
5 |
6 | import teuthology.provision
7 |
8 |
9 | test_config = dict(
10 | pelagos=dict(
11 | endpoint='http://pelagos.example:5000/',
12 | machine_types='ptype1,ptype2',
13 | ),
14 | )
15 |
16 | class TestPelagos(object):
17 |
18 | def setup_method(self):
19 | config.load(deepcopy(test_config))
20 |
21 | def teardown_method(self):
22 | pass
23 |
24 | def test_get_types(self):
25 | #klass = pelagos.Pelagos
26 | types = pelagos.get_types()
27 | assert types == ["ptype1", "ptype2"]
28 |
29 | def test_disabled(self):
30 | config.pelagos['endpoint'] = None
31 | enabled = pelagos.enabled()
32 | assert enabled == False
33 |
34 | def test_pelagos(self):
35 | class context:
36 | pass
37 |
38 | ctx = context()
39 | ctx.os_type ='sle'
40 | ctx.os_version = '15.1'
41 | with raises(Exception) as e_info:
42 | teuthology.provision.reimage(ctx, 'f.q.d.n.org', 'ptype1')
43 | e_str = str(e_info)
44 | print("Caught exception: " + e_str)
45 | assert e_str.find(r"Name\sor\sservice\snot\sknown") == -1
46 |
47 |
--------------------------------------------------------------------------------
/teuthology/reimage.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 |
4 | import teuthology
5 |
6 | from teuthology.parallel import parallel
7 | from teuthology.provision import reimage, get_reimage_types
8 | from teuthology.lock import query, ops
9 | from teuthology.misc import get_user
10 | from teuthology.misc import decanonicalize_hostname as shortname
11 |
12 | log = logging.getLogger(__name__)
13 |
14 | def main(args):
15 | if (args['--verbose']):
16 | teuthology.log.setLevel(logging.DEBUG)
17 |
18 | ctx = argparse.Namespace()
19 | ctx.os_type = args['--os-type']
20 | ctx.os_version = args['--os-version']
21 |
22 | nodes = args['']
23 |
24 | reimage_types = get_reimage_types()
25 | statuses = query.get_statuses(nodes)
26 | owner = args['--owner'] or get_user()
27 | unlocked = [shortname(_['name'])
28 | for _ in statuses if not _['locked']]
29 | if unlocked:
30 | log.error(
31 | "Some of the nodes are not locked: %s", unlocked)
32 | exit(1)
33 |
34 | improper = [shortname(_['name']) for _ in statuses if _['locked_by'] != owner]
35 | if improper:
36 | log.error(
37 | "Some of the nodes are not owned by '%s': %s", owner, improper)
38 | exit(1)
39 |
40 | irreimageable = [shortname(_['name']) for _ in statuses
41 | if _['machine_type'] not in reimage_types]
42 | if irreimageable:
43 | log.error(
44 | "Following nodes cannot be reimaged because theirs machine type "
45 | "is not reimageable: %s", irreimageable)
46 | exit(1)
47 |
48 | def reimage_node(ctx, machine_name, machine_type):
49 | ops.update_nodes([machine_name], True)
50 | reimage(ctx, machine_name, machine_type)
51 | ops.update_nodes([machine_name])
52 | log.debug("Node '%s' reimaging is complete", machine_name)
53 |
54 | with parallel() as p:
55 | for node in statuses:
56 | log.debug("Start node '%s' reimaging", node['name'])
57 | p.spawn(reimage_node, ctx, shortname(node['name']), node['machine_type'])
58 |
--------------------------------------------------------------------------------
/teuthology/safepath.py:
--------------------------------------------------------------------------------
1 | import errno
2 | import os
3 |
4 | def munge(path):
5 | """
6 | Munge a potentially hostile path name to be safe to use.
7 |
8 | This very definitely changes the meaning of the path,
9 | but it only does that for unsafe paths.
10 | """
11 | # explicitly ignoring windows as a platform
12 | segments = path.split('/')
13 | # filter out empty segments like foo//bar
14 | segments = [s for s in segments if s!='']
15 | # filter out no-op segments like foo/./bar
16 | segments = [s for s in segments if s!='.']
17 | # all leading dots become underscores; makes .. safe too
18 | for idx, seg in enumerate(segments):
19 | if seg.startswith('.'):
20 | segments[idx] = '_'+seg[1:]
21 | # empty string, "/", "//", etc
22 | if not segments:
23 | segments = ['_']
24 | return '/'.join(segments)
25 |
26 |
27 | def makedirs(root, path):
28 | """
29 | os.makedirs gets confused if the path contains '..', and root might.
30 |
31 | This relies on the fact that `path` has been normalized by munge().
32 | """
33 | segments = path.split('/')
34 | for seg in segments:
35 | root = os.path.join(root, seg)
36 | try:
37 | os.mkdir(root)
38 | except OSError as e:
39 | if e.errno == errno.EEXIST:
40 | pass
41 | else:
42 | raise
43 |
--------------------------------------------------------------------------------
/teuthology/suite/test/conftest.py:
--------------------------------------------------------------------------------
1 | from teuthology.config import config
2 |
3 | def pytest_runtest_setup():
4 | config.load({})
5 |
--------------------------------------------------------------------------------
/teuthology/suite/test/suites/noop/noop.yaml:
--------------------------------------------------------------------------------
1 | roles:
2 | - - mon.a
3 | - osd.0
4 | tasks:
5 | - exec:
6 | mon.a:
7 | - echo "Well done !"
8 |
--------------------------------------------------------------------------------
/teuthology/suite/test/test_placeholder.py:
--------------------------------------------------------------------------------
1 | from teuthology.suite.placeholder import (
2 | substitute_placeholders, dict_templ, Placeholder
3 | )
4 |
5 |
6 | class TestPlaceholder(object):
7 | def test_substitute_placeholders(self):
8 | suite_hash = 'suite_hash'
9 | input_dict = dict(
10 | suite='suite',
11 | suite_branch='suite_branch',
12 | suite_hash=suite_hash,
13 | ceph_branch='ceph_branch',
14 | ceph_hash='ceph_hash',
15 | teuthology_branch='teuthology_branch',
16 | teuthology_sha1='teuthology_sha1',
17 | machine_type='machine_type',
18 | distro='distro',
19 | distro_version='distro_version',
20 | archive_upload='archive_upload',
21 | archive_upload_key='archive_upload_key',
22 | suite_repo='https://example.com/ceph/suite.git',
23 | suite_relpath='',
24 | ceph_repo='https://example.com/ceph/ceph.git',
25 | flavor='default',
26 | expire='expire',
27 | )
28 | output_dict = substitute_placeholders(dict_templ, input_dict)
29 | assert output_dict['suite'] == 'suite'
30 | assert output_dict['suite_sha1'] == suite_hash
31 | assert isinstance(dict_templ['suite'], Placeholder)
32 | assert isinstance(
33 | dict_templ['overrides']['admin_socket']['branch'],
34 | Placeholder)
35 |
36 | def test_null_placeholders_dropped(self):
37 | input_dict = dict(
38 | suite='suite',
39 | suite_branch='suite_branch',
40 | suite_hash='suite_hash',
41 | ceph_branch='ceph_branch',
42 | ceph_hash='ceph_hash',
43 | teuthology_branch='teuthology_branch',
44 | teuthology_sha1='teuthology_sha1',
45 | machine_type='machine_type',
46 | archive_upload='archive_upload',
47 | archive_upload_key='archive_upload_key',
48 | distro=None,
49 | distro_version=None,
50 | suite_repo='https://example.com/ceph/suite.git',
51 | suite_relpath='',
52 | ceph_repo='https://example.com/ceph/ceph.git',
53 | flavor=None,
54 | expire='expire',
55 | )
56 | output_dict = substitute_placeholders(dict_templ, input_dict)
57 | assert 'os_type' not in output_dict
58 |
--------------------------------------------------------------------------------
/teuthology/task/args.py:
--------------------------------------------------------------------------------
1 | """
2 | These routines only appear to be used by the peering_speed tests.
3 | """
4 | def gen_args(name, args):
5 | """
6 | Called from argify to generate arguments.
7 | """
8 | usage = [""]
9 | usage += [name + ':']
10 | usage += \
11 | [" {key}: <{usage}> ({default})".format(
12 | key=key, usage=_usage, default=default)
13 | for (key, _usage, default, _) in args]
14 | usage.append('')
15 | usage.append(name + ':')
16 | usage += \
17 | [" {key}: {default}".format(
18 | key = key, default = default)
19 | for (key, _, default, _) in args]
20 | usage = '\n'.join(' ' + i for i in usage)
21 | def ret(config):
22 | """
23 | return an object with attributes set from args.
24 | """
25 | class Object(object):
26 | """
27 | simple object
28 | """
29 | pass
30 | obj = Object()
31 | for (key, usage, default, conv) in args:
32 | if key in config:
33 | setattr(obj, key, conv(config[key]))
34 | else:
35 | setattr(obj, key, conv(default))
36 | return obj
37 | return usage, ret
38 |
39 | def argify(name, args):
40 | """
41 | Object used as a decorator for the peering speed tests.
42 | See peering_spee_test.py
43 | """
44 | (usage, config_func) = gen_args(name, args)
45 | def ret1(f):
46 | """
47 | Wrapper to handle doc and usage information
48 | """
49 | def ret2(**kwargs):
50 | """
51 | Call f (the parameter passed to ret1)
52 | """
53 | config = kwargs.get('config', {})
54 | if config is None:
55 | config = {}
56 | kwargs['config'] = config_func(config)
57 | return f(**kwargs)
58 | ret2.__doc__ = f.__doc__ + usage
59 | return ret2
60 | return ret1
61 |
--------------------------------------------------------------------------------
/teuthology/task/background_exec.py:
--------------------------------------------------------------------------------
1 | """
2 | Background task
3 | """
4 |
5 | import contextlib
6 | import logging
7 |
8 | from teuthology import misc
9 | from teuthology.orchestra import run
10 |
11 | log = logging.getLogger(__name__)
12 |
13 |
14 | @contextlib.contextmanager
15 | def task(ctx, config):
16 | """
17 | Run a background task.
18 |
19 | Run the given command on a client, similar to exec. However, when
20 | we hit the finally because the subsequent task is ready to exit, kill
21 | the child process.
22 |
23 | We do not do any error code checking here since we are forcefully killing
24 | off the child when we are done.
25 |
26 | If the command a list, we simply join it with ;'s.
27 |
28 | Example::
29 |
30 | tasks:
31 | - install:
32 | - background_exec:
33 | client.0: while true ; do date ; sleep 1 ; done
34 | client.1:
35 | - while true
36 | - do id
37 | - sleep 1
38 | - done
39 | - exec:
40 | client.0:
41 | - sleep 10
42 |
43 | """
44 | assert isinstance(config, dict), "task background got invalid config"
45 |
46 | testdir = misc.get_testdir(ctx)
47 |
48 | tasks = {}
49 | for role, cmd in config.items():
50 | (remote,) = ctx.cluster.only(role).remotes.keys()
51 | log.info('Running background command on role %s host %s', role,
52 | remote.name)
53 | if isinstance(cmd, list):
54 | cmd = '; '.join(cmd)
55 | cmd.replace('$TESTDIR', testdir)
56 | tasks[remote.name] = remote.run(
57 | args=[
58 | 'sudo',
59 | 'TESTDIR=%s' % testdir,
60 | 'daemon-helper', 'kill', '--kill-group',
61 | 'bash', '-c', cmd,
62 | ],
63 | wait=False,
64 | stdin=run.PIPE,
65 | check_status=False,
66 | logger=log.getChild(remote.name)
67 | )
68 |
69 | try:
70 | yield
71 |
72 | finally:
73 | for name, task in tasks.items():
74 | log.info('Stopping background command on %s', name)
75 | task.stdin.close()
76 | run.wait(tasks.values())
77 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-6.5-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - yum install -y yum-utils && yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/6/x86_64/ && yum install --nogpgcheck -y epel-release && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 && rm /etc/yum.repos.d/dl.fedoraproject.org*
4 | - ( echo ; echo "MaxSessions 1000" ) >> /etc/ssh/sshd_config
5 | - ( echo 'Defaults !requiretty' ; echo 'Defaults visiblepw' ) | tee /etc/sudoers.d/cephlab_sudo
6 | preserve_hostname: true
7 | system_info:
8 | default_user:
9 | name: ubuntu
10 | packages:
11 | - dracut-modules-growroot
12 | runcmd:
13 | - mkinitrd --force /boot/initramfs-2.6.32-573.3.1.el6.x86_64.img 2.6.32-573.3.1.el6.x86_64
14 | - reboot
15 | final_message: "READYTORUN"
16 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.0-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.1-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.2-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/centos-7.3-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/debian-8.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo 'APT::Get::AllowUnauthenticated "true";' | tee /etc/apt/apt.conf.d/99disablesigs
4 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
5 | manage_etc_hosts: true
6 | preserve_hostname: true
7 | system_info:
8 | default_user:
9 | name: ubuntu
10 | runcmd:
11 | - echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
12 | final_message: "READYTORUN"
13 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - zypper --non-interactive --no-gpg-checks rm gettext-runtime-mini grub2 grub2-branding-openSUSE grub2-i386-pc grub2-snapper-plugin grub2-systemd-sleep-plugin
14 | - zypper --non-interactive --no-gpg-checks install --no-recommends wget git-core rsyslog lsb-release make gcc gcc-c++ grub2 rpm-build
15 | - sleep 30
16 | final_message: "READYTORUN"
17 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-42.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | final_message: "READYTORUN"
14 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-42.2-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - 'zypper rr openSUSE-Leap-Cloud-Tools || :'
14 | final_message: "READYTORUN"
15 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/opensuse-42.3-user-data.txt:
--------------------------------------------------------------------------------
1 | opensuse-42.2-user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-12.1-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - zypper --non-interactive install --no-recommends python wget git ntp rsyslog lsb-release
14 | final_message: "READYTORUN"
15 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-12.2-user-data.txt:
--------------------------------------------------------------------------------
1 | sle-12.1-user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-12.3-user-data.txt:
--------------------------------------------------------------------------------
1 | sle-12.1-user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/sle-15.0-user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
4 | manage_etc_hosts: true
5 | preserve_hostname: true
6 | users:
7 | - name: ubuntu
8 | gecos: User
9 | sudo: ["ALL=(ALL) NOPASSWD:ALL"]
10 | groups: users
11 | runcmd:
12 | - ( MYHOME=/home/ubuntu ; mkdir $MYHOME/.ssh ; chmod 700 $MYHOME/.ssh ; cp /root/.ssh/authorized_keys $MYHOME/.ssh ; chown -R ubuntu.users $MYHOME/.ssh )
13 | - zypper --non-interactive --no-gpg-checks install --no-recommends wget git-core rsyslog lsb-release
14 | final_message: "READYTORUN"
15 |
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/ubuntu-12.04-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/ubuntu-14.04-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/ubuntu-16.04-user-data.txt:
--------------------------------------------------------------------------------
1 | user-data.txt
--------------------------------------------------------------------------------
/teuthology/task/buildpackages/user-data.txt:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | bootcmd:
3 | - echo 'APT::Get::AllowUnauthenticated "true";' | tee /etc/apt/apt.conf.d/99disablesigs
4 | - echo nameserver 8.8.8.8 | tee -a /etc/resolv.conf # last resort, in case the DHCP server does not provide a resolver
5 | manage_etc_hosts: true
6 | preserve_hostname: true
7 | system_info:
8 | default_user:
9 | name: ubuntu
10 | final_message: "READYTORUN"
11 |
--------------------------------------------------------------------------------
/teuthology/task/dump_ctx.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import pprint
3 |
4 | log = logging.getLogger(__name__)
5 | pp = pprint.PrettyPrinter(indent=4)
6 |
7 | def _pprint_me(thing, prefix):
8 | return prefix + "\n" + pp.pformat(thing)
9 |
10 | def task(ctx, config):
11 | """
12 | Dump task context and config in teuthology log/output
13 |
14 | The intended use case is didactic - to provide an easy way for newbies, who
15 | are working on teuthology tasks for the first time, to find out what
16 | is inside the ctx and config variables that are passed to each task.
17 | """
18 | log.info(_pprint_me(ctx, "Task context:"))
19 | log.info(_pprint_me(config, "Task config:"))
20 |
--------------------------------------------------------------------------------
/teuthology/task/full_sequential.py:
--------------------------------------------------------------------------------
1 | """
2 | Task sequencer - full
3 | """
4 | import sys
5 | import logging
6 |
7 | from teuthology import run_tasks
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def task(ctx, config):
13 | """
14 | Run a set of tasks to completion in order. __exit__ is called on a task
15 | before __enter__ on the next
16 |
17 | example::
18 | - full_sequential:
19 | - tasktest:
20 | - tasktest:
21 |
22 | :param ctx: Context
23 | :param config: Configuration
24 | """
25 | for entry in config:
26 | if not isinstance(entry, dict):
27 | entry = ctx.config.get(entry, {})
28 | ((taskname, confg),) = entry.items()
29 | log.info('In full_sequential, running task %s...' % taskname)
30 | mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
31 | if hasattr(mgr, '__enter__'):
32 | try:
33 | mgr.__enter__()
34 | finally:
35 | try:
36 | exc_info = sys.exc_info()
37 | mgr.__exit__(*exc_info)
38 | finally:
39 | del exc_info
40 |
--------------------------------------------------------------------------------
/teuthology/task/full_sequential_finally.py:
--------------------------------------------------------------------------------
1 | """
2 | Task sequencer finally
3 | """
4 | import sys
5 | import logging
6 | import contextlib
7 |
8 | from teuthology import run_tasks
9 |
10 | log = logging.getLogger(__name__)
11 |
12 |
13 | @contextlib.contextmanager
14 | def task(ctx, config):
15 | """
16 | Sequentialize a group of tasks into one executable block, run on cleanup
17 |
18 | example::
19 |
20 | tasks:
21 | - foo:
22 | - full_sequential_finally:
23 | - final1:
24 | - final2:
25 | - bar:
26 | - baz:
27 |
28 | The final1 and final2 tasks will run when full_sequentiall_finally is torn
29 | down, after the nested bar and baz tasks have run to completion, and right
30 | before the preceding foo task is torn down. This is useful if there are
31 | additional steps you want to interject in a job during the shutdown (instead
32 | of startup) phase.
33 |
34 | :param ctx: Context
35 | :param config: Configuration
36 | """
37 | try:
38 | yield
39 | finally:
40 | for entry in config:
41 | if not isinstance(entry, dict):
42 | entry = ctx.config.get(entry, {})
43 | ((taskname, confg),) = entry.items()
44 | log.info('In full_sequential_finally, running task %s...' % taskname)
45 | mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
46 | if hasattr(mgr, '__enter__'):
47 | try:
48 | mgr.__enter__()
49 | finally:
50 | try:
51 | exc_info = sys.exc_info()
52 | mgr.__exit__(*exc_info)
53 | finally:
54 | del exc_info
55 |
--------------------------------------------------------------------------------
/teuthology/task/install/bin/adjust-ulimits:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # If we're running as root, allow large amounts of open files.
3 | USER=$(whoami)
4 |
5 | # If a ulimit call fails, exit immediately.
6 | set -e
7 |
8 | if [ "$USER" = "root" ]
9 | then
10 | # Enable large number of open files
11 | ulimit -n 65536
12 | fi
13 |
14 | # Enable core dumps for everything
15 | ulimit -c unlimited
16 | exec "$@"
17 |
--------------------------------------------------------------------------------
/teuthology/task/install/packages.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ceph:
3 | deb:
4 | - ceph
5 | - ceph-mds
6 | - ceph-common
7 | - ceph-fuse
8 | - ceph-test
9 | - radosgw
10 | - python3-rados
11 | - python3-rgw
12 | - python3-cephfs
13 | - python3-rbd
14 | - libcephfs2
15 | - librados2
16 | - librbd1
17 | - rbd-fuse
18 | - ceph-dbg
19 | - ceph-mds-dbg
20 | - ceph-common-dbg
21 | - ceph-fuse-dbg
22 | - radosgw-dbg
23 | - libcephfs2-dbg
24 | - librados2-dbg
25 | - librbd1-dbg
26 | rpm:
27 | - ceph-radosgw
28 | - ceph-test
29 | - ceph
30 | - ceph-fuse
31 | - libcephfs2
32 | - librados2
33 | - librbd1
34 | - python3-rados
35 | - python3-rgw
36 | - python3-cephfs
37 | - python3-rbd
38 | - rbd-fuse
39 | - ceph-debuginfo
40 |
--------------------------------------------------------------------------------
/teuthology/task/interactive.py:
--------------------------------------------------------------------------------
1 | """
2 | Drop into a python shell
3 | """
4 | import code
5 | import readline
6 | import rlcompleter
7 | rlcompleter.__name__ # silence pyflakes
8 | import pprint
9 |
10 | readline.parse_and_bind('tab: complete')
11 |
12 | def task(ctx, config):
13 | """
14 | Run an interactive Python shell, with the cluster accessible via
15 | the ``ctx`` variable.
16 |
17 | Hit ``control-D`` to continue.
18 |
19 | This is also useful to pause the execution of the test between two
20 | tasks, either to perform ad hoc operations, or to examine the
21 | state of the cluster. You can also use it to easily bring up a
22 | Ceph cluster for ad hoc testing.
23 |
24 | For example::
25 |
26 | tasks:
27 | - ceph:
28 | - interactive:
29 | """
30 |
31 | pp = pprint.PrettyPrinter().pprint
32 | code.interact(
33 | banner='Ceph test interactive mode, use ctx to interact with the cluster, press control-D to exit...',
34 | # TODO simplify this
35 | local=dict(
36 | ctx=ctx,
37 | config=config,
38 | pp=pp,
39 | ),
40 | )
41 |
--------------------------------------------------------------------------------
/teuthology/task/internal/check_lock.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import teuthology.lock.query
4 | import teuthology.lock.util
5 |
6 | from teuthology.config import config as teuth_config
7 |
8 | log = logging.getLogger(__name__)
9 |
10 |
11 | def check_lock(ctx, config, check_up=True):
12 | """
13 | Check lock status of remote machines.
14 | """
15 | if not teuth_config.lock_server or ctx.config.get('check-locks') is False:
16 | log.info('Lock checking disabled.')
17 | return
18 | log.info('Checking locks...')
19 | for machine in ctx.config['targets'].keys():
20 | status = teuthology.lock.query.get_status(machine)
21 | log.debug('machine status is %s', repr(status))
22 | assert status is not None, \
23 | 'could not read lock status for {name}'.format(name=machine)
24 | if check_up:
25 | assert status['up'], 'machine {name} is marked down'.format(
26 | name=machine
27 | )
28 | assert status['locked'], \
29 | 'machine {name} is not locked'.format(name=machine)
30 | assert status['locked_by'] == ctx.owner, \
31 | 'machine {name} is locked by {user}, not {owner}'.format(
32 | name=machine,
33 | user=status['locked_by'],
34 | owner=ctx.owner,
35 | )
36 |
--------------------------------------------------------------------------------
/teuthology/task/internal/edit_sudoers.sh:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 |
3 | sudo vi -e /etc/sudoers <
2 |
3 | {% if job_id %}job {{ job_id }} {% endif %}performance data
4 |
5 | {% for metric in graphs.keys() %}
6 | {% if mode == 'static' %}
7 | {% set url = graphs[metric].file.split('/')[-1] %}
8 | {% else %}
9 | {% set url = graphs[metric].url %}
10 | {% endif %}
11 | {{ metric }}
12 |
13 |
14 | {% endfor %}
15 |