├── docs ├── .keep └── docsite │ └── links.yml ├── tests ├── unit │ ├── .keep │ ├── test_basic.py │ ├── modules │ │ ├── common.py │ │ ├── ca_test_common.py │ │ ├── test_radosgw_caps.py │ │ ├── test_ceph_fs.py │ │ ├── test_radosgw_realm.py │ │ ├── test_ceph_crush.py │ │ ├── test_radosgw_zonegroup.py │ │ ├── test_radosgw_user.py │ │ ├── test_ceph_dashboard_user.py │ │ ├── test_ceph_osd_flag.py │ │ └── test_ceph_volume_simple_scan.py │ └── module_utils │ │ ├── test_ceph_common.py │ │ └── test_ca_common.py ├── .gitignore ├── integration │ ├── __init__.py │ └── test_integration.py └── sanity │ ├── ignore-2.15.txt │ ├── ignore-2.16.txt │ ├── ignore-2.17.txt │ ├── ignore-2.18.txt │ ├── ignore-2.19.txt │ ├── ignore-devel.txt │ └── ignore-milestone.txt ├── roles └── run │ ├── files │ └── .keep │ ├── templates │ └── .keep │ ├── tests │ └── inventory │ ├── tasks │ └── main.yml │ ├── vars │ └── main.yml │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ ├── meta │ └── main.yml │ └── README.md ├── plugins ├── action │ └── __init__.py ├── cache │ └── __init__.py ├── filter │ └── __init__.py ├── modules │ ├── __init__.py │ ├── ceph_crush_rule_info.py │ ├── ceph_mgr_module.py │ ├── ceph_osd_flag.py │ ├── ceph_osd.py │ ├── ceph_volume_simple_scan.py │ ├── ceph_config.py │ ├── cephadm_adopt.py │ ├── ceph_key_list.py │ ├── ceph_volume_simple_activate.py │ ├── ceph_orch_daemon.py │ ├── ceph_key_info.py │ └── ceph_authtool.py ├── test │ └── __init__.py ├── inventory │ └── __init__.py ├── module_utils │ ├── __init__.py │ ├── ceph_key_common.py │ ├── ceph_crush_rule_common.py │ └── ceph_common.py ├── plugin_utils │ └── __init__.py └── sub_plugins │ └── __init__.py ├── meta └── runtime.yml ├── MAINTAINERS ├── .vscode └── extensions.json ├── changelogs ├── fragments │ ├── 1_0_2.yml │ ├── 17-min_size_for_pool.yaml │ ├── ceph_orch_apply.yaml │ └── 12-ceph_orch_apply.yaml └── config.yaml ├── requirements.txt ├── CONTRIBUTING ├── tox-ansible.ini ├── .isort.cfg ├── test-requirements.txt ├── CODE_OF_CONDUCT.md ├── extensions ├── molecule │ ├── utils │ │ ├── playbooks │ │ │ ├── noop.yml │ │ │ └── converge.yml │ │ └── vars │ │ │ └── vars.yml │ └── integration_hello_world │ │ └── molecule.yml └── eda │ └── rulebooks │ └── rulebook.yml ├── pyproject.toml ├── .prettierignore ├── CHANGELOG.rst ├── .github ├── workflows │ ├── release.yml │ ├── changelog.yaml │ ├── build_import.yaml │ ├── release_galaxy.yaml │ ├── tests.yml │ ├── ansible_lint.yaml │ ├── sanity.yaml │ ├── unit.yaml │ └── unit_source.yml └── actions │ ├── add_tox_ansible │ └── action.yaml │ ├── ansible_validate_changelog │ └── action.yaml │ ├── identify_collection │ └── action.yml │ ├── checkout_dependency │ ├── README.md │ ├── action.yml │ ├── resolve_dependency.py │ └── test_resolve_dependency.py │ └── build_install_collection │ └── action.yml ├── devfile.yaml ├── .devcontainer ├── devcontainer.json ├── docker │ └── devcontainer.json └── podman │ └── devcontainer.json ├── .pre-commit-config.yaml ├── galaxy.yml ├── README.md └── .gitignore /docs/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/run/files/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/action/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/cache/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/filter/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/run/templates/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/.gitignore: -------------------------------------------------------------------------------- 1 | output/ 2 | -------------------------------------------------------------------------------- /plugins/inventory/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/module_utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/plugin_utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/sub_plugins/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/run/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /meta/runtime.yml: -------------------------------------------------------------------------------- 1 | --- 2 | requires_ansible: ">=2.15.0" 3 | -------------------------------------------------------------------------------- /roles/run/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for run 3 | -------------------------------------------------------------------------------- /roles/run/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for run 3 | -------------------------------------------------------------------------------- /roles/run/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for run 3 | -------------------------------------------------------------------------------- /roles/run/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for run 3 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Teoman ONAY 2 | Guillaume ABRIOUX -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": ["redhat.ansible"] 3 | } 4 | -------------------------------------------------------------------------------- /changelogs/fragments/1_0_2.yml: -------------------------------------------------------------------------------- 1 | major_changes: 2 | - Import ceph-ansible modules and fixes unittests 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # TO-DO: add python packages that are required for this collection 2 | tox 3 | boto 4 | boto3 -------------------------------------------------------------------------------- /CONTRIBUTING: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Refer to the [Ansible community guide](https://docs.ansible.com/ansible/devel/community/index.html). 4 | -------------------------------------------------------------------------------- /tox-ansible.ini: -------------------------------------------------------------------------------- 1 | [ansible] 2 | 3 | skip = 4 | 2.15 5 | 2.16 6 | 2.17 7 | devel 8 | milestone 9 | py3.11 10 | py3.12 -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | known_first_party=ansible_collections.ceph.automation 3 | line_length=100 4 | lines_after_imports=2 5 | lines_between_types=1 6 | profile=black 7 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | # TO-DO: add python packages that are required for testing this collection 2 | pytest-ansible 3 | pytest-xdist 4 | molecule 5 | mock 6 | boto 7 | boto3 -------------------------------------------------------------------------------- /tests/unit/test_basic.py: -------------------------------------------------------------------------------- 1 | """Unit tests for ceph.automation""" 2 | 3 | 4 | def test_basic() -> None: 5 | """Dummy unit test that always passes.""" 6 | assert True 7 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Code of Conduct 2 | 3 | Please see the official [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). 4 | -------------------------------------------------------------------------------- /changelogs/fragments/17-min_size_for_pool.yaml: -------------------------------------------------------------------------------- 1 | minor_changes: 2 | - ceph_pool - Added ability to change the 'min_size' option for Ceph pools. (https://github.com/ceph/ceph.automation/pull/17). 3 | -------------------------------------------------------------------------------- /changelogs/fragments/ceph_orch_apply.yaml: -------------------------------------------------------------------------------- 1 | bugfixes: 2 | - ceph_orch_apply - The fix ensures that the existing orchestrator service is loaded when 3 | there are multiple YAML documents returning data for the same service. 4 | -------------------------------------------------------------------------------- /extensions/molecule/utils/playbooks/noop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: No-op 3 | hosts: localhost 4 | gather_facts: false 5 | tasks: 6 | - name: Run a noop 7 | ansible.builtin.debug: 8 | msg: "This does nothing!" 9 | -------------------------------------------------------------------------------- /extensions/molecule/utils/vars/vars.yml: -------------------------------------------------------------------------------- 1 | collection_root: "{{ lookup('pipe', 'git rev-parse --show-toplevel') }}" 2 | integration_tests_path: "{{ collection_root }}/tests/integration/targets/" 3 | molecule_scenario_name: "{{ molecule_scenario_directory | basename }}" 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 100 3 | 4 | [tool.pytest.ini_options] 5 | addopts = ["-vvv", "-n", "2", "--log-level", "WARNING", "--color", "yes"] 6 | filterwarnings = ['ignore:AnsibleCollectionFinder has already been configured'] 7 | testpaths = ["tests"] 8 | -------------------------------------------------------------------------------- /changelogs/fragments/12-ceph_orch_apply.yaml: -------------------------------------------------------------------------------- 1 | bugfixes: 2 | - > 3 | ceph_orch_apply - Fixed idempotency check for services that do not require the 'service_id' key in the specification 4 | and the 'host' service type (https://github.com/ceph/ceph.automation/pull/12). 5 | -------------------------------------------------------------------------------- /extensions/eda/rulebooks/rulebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Hello Events 3 | hosts: localhost 4 | sources: 5 | - ansible.eda.range: 6 | limit: 5 7 | rules: 8 | - name: Say Hello 9 | condition: event.i == 1 10 | action: 11 | run_playbook: 12 | name: ansible.eda.hello 13 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # files we don't want prettier to ever to look into 2 | .*/ 3 | coverage/ 4 | 5 | # Environments 6 | .env 7 | .venv 8 | env/ 9 | venv/ 10 | ENV/ 11 | env.bak/ 12 | venv.bak/ 13 | 14 | # A linked collection directory created by pytest-ansible-units 15 | collections/ 16 | 17 | tests/output/ 18 | 19 | README.md 20 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | This should be updated by antsibull-changelog. Do not edit this manually! 2 | 3 | See https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelogs.rst for 4 | information on how to use antsibull-changelog. 5 | 6 | Check out ``changelogs/config.yaml`` for its configuration. You need to change at least the ``title`` field in there. 7 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Release ceph.automation" 3 | 4 | on: # yamllint disable-line rule:truthy 5 | release: 6 | types: [published] 7 | 8 | jobs: 9 | release_automation_hub: 10 | uses: ./.github/workflows/release_galaxy.yaml 11 | with: 12 | environment: release 13 | secrets: 14 | ansible_galaxy_api_key: ${{ secrets.ANSIBLE_GALAXY_API_KEY }} 15 | -------------------------------------------------------------------------------- /plugins/module_utils/ceph_key_common.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | __metaclass__ = type 3 | 4 | 5 | def exec_commands(module, cmd_list): 6 | ''' 7 | Execute command(s) 8 | ''' 9 | 10 | for cmd in cmd_list: 11 | rc, out, err = module.run_command(cmd) 12 | if rc != 0: 13 | return rc, cmd, out, err 14 | 15 | return rc, cmd, out, err 16 | -------------------------------------------------------------------------------- /devfile.yaml: -------------------------------------------------------------------------------- 1 | schemaVersion: 2.2.2 2 | metadata: 3 | name: ansible-demo 4 | components: 5 | - name: tooling-container 6 | container: 7 | image: ghcr.io/ansible/ansible-workspace-env-reference:latest 8 | memoryRequest: 256M 9 | memoryLimit: 6Gi 10 | cpuRequest: 250m 11 | cpuLimit: 2000m 12 | args: ["tail", "-f", "/dev/null"] 13 | env: 14 | - name: KUBEDOCK_ENABLED 15 | value: "true" 16 | -------------------------------------------------------------------------------- /tests/integration/test_integration.py: -------------------------------------------------------------------------------- 1 | """Tests for molecule scenarios.""" 2 | 3 | from __future__ import absolute_import, division, print_function 4 | 5 | from pytest_ansible.molecule import MoleculeScenario 6 | 7 | 8 | def test_integration(molecule_scenario: MoleculeScenario) -> None: 9 | """Run molecule for each scenario. 10 | 11 | :param molecule_scenario: The molecule scenario object 12 | """ 13 | proc = molecule_scenario.test() 14 | assert proc.returncode == 0 15 | -------------------------------------------------------------------------------- /extensions/molecule/integration_hello_world/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: na 4 | 5 | provisioner: 6 | name: ansible 7 | playbooks: 8 | cleanup: ../utils/playbooks/noop.yml 9 | converge: ../utils/playbooks/converge.yml 10 | destroy: ../utils/playbooks/noop.yml 11 | prepare: ../utils/playbooks/noop.yml 12 | config_options: 13 | defaults: 14 | collections_path: ${ANSIBLE_COLLECTIONS_PATH} 15 | scenario: 16 | test_sequence: 17 | - prepare 18 | - converge 19 | destroy_sequence: 20 | - destroy 21 | -------------------------------------------------------------------------------- /extensions/molecule/utils/playbooks/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Shared integration test runner 3 | hosts: localhost 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Load the vars 8 | ansible.builtin.include_vars: 9 | file: ../../utils/vars/vars.yml 10 | 11 | - name: "Integration test: {{ test_name }}" 12 | ansible.builtin.include_role: 13 | name: "{{ test_path }}" 14 | vars: 15 | test_path: "{{ integration_tests_path }}{{ test_name }}" 16 | test_name: "{{ molecule_scenario_name.replace('integration_', '') }}" 17 | -------------------------------------------------------------------------------- /.github/workflows/changelog.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Check changelog 3 | "on": 4 | workflow_call: null 5 | jobs: 6 | changelog: 7 | runs-on: ubuntu-latest 8 | name: Requires changelog 9 | if: "!contains(github.event.pull_request.labels.*.name, 'skip-changelog')" 10 | steps: 11 | - name: Checkout the collection repository 12 | uses: actions/checkout@v4 13 | with: 14 | ref: "${{ github.event.pull_request.head.sha }}" 15 | fetch-depth: "0" 16 | - name: Validate changelog 17 | uses: >- 18 | ./.github/actions/ansible_validate_changelog 19 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ansible-dev-container-codespaces", 3 | "image": "ghcr.io/ansible/community-ansible-dev-tools:latest", 4 | "containerUser": "podman", 5 | "runArgs": [ 6 | "--security-opt", 7 | "seccomp=unconfined", 8 | "--security-opt", 9 | "label=disable", 10 | "--cap-add=SYS_ADMIN", 11 | "--cap-add=SYS_RESOURCE", 12 | "--device", 13 | "/dev/fuse", 14 | "--security-opt", 15 | "apparmor=unconfined", 16 | "--hostname=ansible-dev-container" 17 | ], 18 | "updateRemoteUserUID": true, 19 | "customizations": { 20 | "vscode": { 21 | "extensions": ["redhat.ansible"] 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /.devcontainer/docker/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ansible-dev-container-docker", 3 | "image": "ghcr.io/ansible/community-ansible-dev-tools:latest", 4 | "containerUser": "podman", 5 | "runArgs": [ 6 | "--security-opt", 7 | "seccomp=unconfined", 8 | "--security-opt", 9 | "label=disable", 10 | "--cap-add=SYS_ADMIN", 11 | "--cap-add=SYS_RESOURCE", 12 | "--device", 13 | "/dev/fuse", 14 | "--security-opt", 15 | "apparmor=unconfined", 16 | "--hostname=ansible-dev-container" 17 | ], 18 | "updateRemoteUserUID": true, 19 | "customizations": { 20 | "vscode": { 21 | "extensions": ["redhat.ansible"] 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /.github/actions/add_tox_ansible/action.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Add tox ansible" 3 | description: Ensure a valid tox-ansible.ini is present in the repo. 4 | inputs: 5 | path: 6 | description: | 7 | Path to the collection to add tox-ansible.ini file. 8 | required: false 9 | default: . 10 | runs: 11 | using: composite 12 | steps: 13 | - name: Add file, if tox-ansible configuration file does not exist 14 | run: >- 15 | [ -e tox-ansible.ini ] || { echo "[ansible]" > tox-ansible.ini && echo 16 | -e "skip =\n\tpy3.7\n\tpy3.8\n\t2.9\n\t2.10\n\t2.11\n\t2.12\n\t2.13" >> 17 | tox-ansible.ini; } 18 | shell: bash 19 | working-directory: "${{ inputs.path }}" 20 | -------------------------------------------------------------------------------- /.devcontainer/podman/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ansible-dev-container-podman", 3 | "image": "ghcr.io/ansible/community-ansible-dev-tools:latest", 4 | "containerUser": "root", 5 | "runArgs": [ 6 | "--cap-add=SYS_ADMIN", 7 | "--cap-add=SYS_RESOURCE", 8 | "--device", 9 | "/dev/fuse", 10 | "--security-opt", 11 | "seccomp=unconfined", 12 | "--security-opt", 13 | "label=disable", 14 | "--security-opt", 15 | "apparmor=unconfined", 16 | "--userns=host", 17 | "--hostname=ansible-dev-container", 18 | "--volume", 19 | "ansible-dev-tools-container-storage:/var/lib/containers" 20 | ], 21 | "customizations": { 22 | "vscode": { 23 | "extensions": ["redhat.ansible"] 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /tests/unit/modules/common.py: -------------------------------------------------------------------------------- 1 | from ansible.module_utils import basic 2 | from ansible.module_utils._text import to_bytes 3 | import json 4 | 5 | 6 | def set_module_args(args): 7 | if '_ansible_remote_tmp' not in args: 8 | args['_ansible_remote_tmp'] = '/tmp' 9 | if '_ansible_keep_remote_files' not in args: 10 | args['_ansible_keep_remote_files'] = False 11 | 12 | args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) 13 | basic._ANSIBLE_ARGS = to_bytes(args) 14 | 15 | 16 | class AnsibleExitJson(Exception): 17 | pass 18 | 19 | 20 | class AnsibleFailJson(Exception): 21 | pass 22 | 23 | 24 | def exit_json(*args, **kwargs): 25 | raise AnsibleExitJson(kwargs) 26 | 27 | 28 | def fail_json(*args, **kwargs): 29 | raise AnsibleFailJson(kwargs) 30 | -------------------------------------------------------------------------------- /plugins/module_utils/ceph_crush_rule_common.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | __metaclass__ = type 3 | 4 | 5 | try: 6 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import generate_cmd 7 | except ImportError: 8 | from module_utils.ceph_common import generate_cmd 9 | 10 | 11 | def get_rule(module, container_image=None): 12 | ''' 13 | Get existing crush rule 14 | ''' 15 | 16 | cluster = module.params.get('cluster') 17 | name = module.params.get('name') 18 | 19 | args = ['dump', name, '--format=json'] 20 | 21 | cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], 22 | args=args, 23 | cluster=cluster, 24 | container_image=container_image) 25 | 26 | return cmd 27 | -------------------------------------------------------------------------------- /tests/unit/modules/ca_test_common.py: -------------------------------------------------------------------------------- 1 | from ansible.module_utils import basic 2 | from ansible.module_utils._text import to_bytes 3 | import json 4 | 5 | 6 | def set_module_args(args): 7 | if '_ansible_remote_tmp' not in args: 8 | args['_ansible_remote_tmp'] = '/tmp' 9 | if '_ansible_keep_remote_files' not in args: 10 | args['_ansible_keep_remote_files'] = False 11 | 12 | args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) 13 | basic._ANSIBLE_ARGS = to_bytes(args) 14 | basic._ANSIBLE_PROFILE = "legacy" 15 | 16 | 17 | class AnsibleExitJson(Exception): 18 | pass 19 | 20 | 21 | class AnsibleFailJson(Exception): 22 | pass 23 | 24 | 25 | def exit_json(*args, **kwargs): 26 | raise AnsibleExitJson(kwargs) 27 | 28 | 29 | def fail_json(*args, **kwargs): 30 | raise AnsibleFailJson(kwargs) 31 | -------------------------------------------------------------------------------- /.github/workflows/build_import.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build and import collection 3 | 4 | "on": 5 | workflow_call: null 6 | 7 | jobs: 8 | build-import: 9 | name: build-import-collection 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v4 14 | 15 | - name: Ensure ansible-core and galaxy-importer is installed 16 | shell: bash 17 | run: | 18 | python -m pip install ansible-core galaxy-importer 19 | 20 | - name: Update galaxy-importer cfg 21 | shell: bash 22 | run: | 23 | echo "[galaxy-importer]\nCHECK_REQUIRED_TAGS=True" > /tmp/galaxy-importer.cfg 24 | export GALAXY_IMPORTER_CONFIG=/tmp/galaxy-importer.cfg 25 | 26 | - name: Build the collection tarball and run galaxy importer on it 27 | shell: bash 28 | run: | 29 | python -m galaxy_importer.main --git-clone-path . --output-path /tmp 30 | -------------------------------------------------------------------------------- /changelogs/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | changelog_filename_template: ../CHANGELOG.rst 3 | changelog_filename_version_depth: 0 4 | changes_file: changelog.yaml 5 | changes_format: combined 6 | keep_fragments: false 7 | mention_ancestor: true 8 | new_plugins_after_name: removed_features 9 | notesdir: fragments 10 | prelude_section_name: release_summary 11 | prelude_section_title: Release Summary 12 | flatmap: true 13 | sections: 14 | - - major_changes 15 | - Major Changes 16 | - - minor_changes 17 | - Minor Changes 18 | - - breaking_changes 19 | - Breaking Changes / Porting Guide 20 | - - deprecated_features 21 | - Deprecated Features 22 | - - removed_features 23 | - Removed Features (previously deprecated) 24 | - - security_fixes 25 | - Security Fixes 26 | - - bugfixes 27 | - Bugfixes 28 | - - known_issues 29 | - Known Issues 30 | - - doc_changes 31 | - Documentation Changes 32 | title: "Ceph Automation Collection" 33 | trivial_section_name: trivial 34 | -------------------------------------------------------------------------------- /.github/actions/ansible_validate_changelog/action.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ansible_validate_changelog 3 | description: Ensure a valid changelog has been added to the pull request. 4 | inputs: 5 | path: 6 | description: | 7 | Path to the collection to validate changelog from. 8 | required: false 9 | default: . 10 | base_ref: 11 | description: The pull request base ref. 12 | required: false 13 | default: "${{ github.event.pull_request.base.ref }}" 14 | runs: 15 | using: composite 16 | steps: 17 | - name: Setup python 18 | uses: actions/setup-python@v4 19 | with: 20 | python-version: "3.11" 21 | - name: Install python dependencies 22 | run: | 23 | pip install -U pyyaml 24 | shell: bash 25 | - name: Validate changelog 26 | run: >- 27 | python3 ${{ github.action_path }}/validate_changelog.py --ref ${{ 28 | inputs.base_ref }} 29 | shell: bash 30 | working-directory: "${{ inputs.path }}" 31 | -------------------------------------------------------------------------------- /.github/workflows/release_galaxy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ansible Galaxy release 3 | "on": 4 | workflow_call: 5 | inputs: 6 | environment: 7 | description: The deployment environment 8 | required: true 9 | type: string 10 | galaxy_publish: 11 | default: true 12 | description: Publish the collection on galaxy 13 | type: boolean 14 | secrets: 15 | ansible_galaxy_api_key: 16 | required: false 17 | jobs: 18 | release: 19 | runs-on: ubuntu-latest 20 | name: Galaxy release 21 | environment: "${{ inputs.environment }}" 22 | steps: 23 | - uses: actions/checkout@v4 24 | - name: Build the collection 25 | run: | 26 | ansible-galaxy collection build -v --force 27 | - name: Publish the collection on Galaxy 28 | if: "${{ inputs.galaxy_publish }}" 29 | run: > 30 | [[ "${{ secrets.ansible_galaxy_api_key != '' }}" ]] || { echo 31 | "ansible_galaxy_api_key is required to publish on galaxy" ; exit 1; } 32 | 33 | TARBALL=$(ls -1 ./*.tar.gz) 34 | 35 | ansible-galaxy collection publish "${TARBALL}" --api-key "${{ 36 | secrets.ansible_galaxy_api_key }}" 37 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/ansible-network/collection_prep 4 | rev: 1.1.1 5 | hooks: 6 | - id: update-docs 7 | 8 | - repo: https://github.com/pre-commit/pre-commit-hooks 9 | rev: v4.4.0 10 | hooks: 11 | - id: check-merge-conflict 12 | - id: check-symlinks 13 | - id: debug-statements 14 | - id: end-of-file-fixer 15 | - id: no-commit-to-branch 16 | args: [--branch, main] 17 | - id: trailing-whitespace 18 | 19 | - repo: https://github.com/asottile/add-trailing-comma 20 | rev: v3.0.0 21 | hooks: 22 | - id: add-trailing-comma 23 | 24 | - repo: https://github.com/pre-commit/mirrors-prettier 25 | rev: "v3.0.0" 26 | hooks: 27 | - id: prettier 28 | entry: env CI=1 bash -c "prettier --list-different . || ec=$? && prettier --loglevel=error --write . && exit $ec" 29 | pass_filenames: false 30 | args: [] 31 | additional_dependencies: 32 | - prettier 33 | - prettier-plugin-toml 34 | 35 | - repo: https://github.com/PyCQA/isort 36 | rev: 5.12.0 37 | hooks: 38 | - id: isort 39 | name: Sort import statements using isort 40 | args: ["--filter-files"] 41 | 42 | - repo: https://github.com/psf/black 43 | rev: 23.7.0 44 | hooks: 45 | - id: black 46 | 47 | - repo: https://github.com/pycqa/flake8 48 | rev: 7.0.0 49 | hooks: 50 | - id: flake8 51 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "CI" 3 | 4 | concurrency: 5 | group: ${{ github.head_ref || github.run_id }} 6 | cancel-in-progress: true 7 | 8 | on: # yamllint disable-line rule:truthy 9 | pull_request: 10 | branches: [main] 11 | workflow_dispatch: 12 | schedule: 13 | - cron: '0 0 * * *' 14 | 15 | jobs: 16 | changelog: 17 | uses: ./.github/workflows/changelog.yaml 18 | if: github.event_name == 'pull_request' 19 | build-import: 20 | uses: ./.github/workflows/build_import.yaml 21 | ansible-lint: 22 | uses: ./.github/workflows/ansible_lint.yaml 23 | sanity: 24 | uses: ./.github/workflows/sanity.yaml 25 | unit-galaxy: 26 | uses: ./.github/workflows/unit.yaml 27 | unit-source: 28 | uses: ./.github/workflows/unit_source.yml 29 | with: 30 | collection_pre_install: >- 31 | git+https://github.com/ansible-collections/ansible.utils.git 32 | all_green: 33 | if: ${{ always() }} 34 | needs: 35 | - changelog 36 | - build-import 37 | - sanity 38 | - unit-galaxy 39 | - unit-source 40 | - ansible-lint 41 | runs-on: ubuntu-latest 42 | steps: 43 | - run: >- 44 | python -c "assert 'failure' not in 45 | set([ 46 | '${{ needs.changelog.result }}', 47 | '${{ needs.sanity.result }}', 48 | '${{ needs.unit-galaxy.result }}' 49 | '${{ needs.ansible-lint.result }}' 50 | '${{ needs.unit-source.result }}' 51 | ])" 52 | -------------------------------------------------------------------------------- /.github/actions/identify_collection/action.yml: -------------------------------------------------------------------------------- 1 | name: Retrieve collection information 2 | description: Extract collection information from the galaxy.yml file 3 | 4 | inputs: 5 | source_path: 6 | description: Path to the collection source root 7 | required: true 8 | outputs: 9 | tar_file: 10 | description: The collection tarball when built 11 | value: ${{ steps.keys.outputs.namespace }}-${{ steps.keys.outputs.name }}-${{ steps.keys.outputs.version }}.tar.gz 12 | collection_path: 13 | description: The final collection path 14 | value: /home/runner/collections/ansible_collections/${{ steps.keys.outputs.namespace }}/${{ steps.keys.outputs.name }} 15 | dependency: 16 | description: The collection dependency 17 | value: ${{ steps.keys.outputs.dependency }} 18 | 19 | runs: 20 | using: composite 21 | steps: 22 | - name: Github action python requirements 23 | run: pip3 install yq 24 | shell: bash 25 | 26 | - name: Extract metadata from galaxy.yml 27 | id: keys 28 | run: | 29 | echo "namespace=$(yq -r '.namespace' 'galaxy.yml')" >> $GITHUB_OUTPUT 30 | echo "name=$(yq -r '.name' 'galaxy.yml')" >> $GITHUB_OUTPUT 31 | echo "version=$(yq -r '.version' 'galaxy.yml')" >> $GITHUB_OUTPUT 32 | echo "dependency=$(yq -r '.dependencies // [] | keys | join(" ")' 'galaxy.yml')" >> $GITHUB_OUTPUT 33 | shell: bash 34 | working-directory: ${{ inputs.source_path }} 35 | 36 | - name: Github action python requirements 37 | run: pip3 uninstall yq -y 38 | shell: bash 39 | -------------------------------------------------------------------------------- /.github/actions/checkout_dependency/README.md: -------------------------------------------------------------------------------- 1 | # checkout_dependency 2 | 3 | This action checks-out your repository under the specified destination directory using the action actions/checkout. Use the `depends-On: repository/pull/xx` to override the reference to checkout. 4 | 5 | # Usage 6 | 7 | 8 | 9 | ```yaml 10 | - uses: ./.github/actions/checkout_dependency@main 11 | with: 12 | # Repository name with owner. For example, ansible-collections/kubernetes.core 13 | repository: "" 14 | 15 | # The branch, tag, or SHA to checkout when the pull request body does not 16 | # contain any override for this repository. 17 | ref: "" 18 | 19 | # Relative path under $GITHUB_WORKSPACE to place the repository 20 | path: "" 21 | 22 | # Number of commits to fetch. 0 indicates all history for all branches and tags. 23 | fetch-depth: "1" 24 | ``` 25 | 26 | 27 | 28 | # Depending on others PRs 29 | 30 | The pull request body should contain the following sequence: 31 | 32 | ``` 33 | Depends-On: repository/pull/xx 34 | ``` 35 | 36 | # Scenarios 37 | 38 | - [checkout pull request 12345 from repository my_org/my_collection](#Checkout-depending-pull-request) 39 | 40 | ## Checkout depending pull request 41 | 42 | Github action step: 43 | 44 | ```yaml 45 | - uses: ./.github/actions/checkout_dependency@main 46 | with: 47 | repository: my_org/my_collection 48 | ref: main 49 | path: /path/to/checkout/repository 50 | ``` 51 | 52 | Pull request body: 53 | 54 | ```text 55 | Depends-On: https://github.com/my_org/my_collection/pull/12345 56 | ``` 57 | -------------------------------------------------------------------------------- /galaxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This collection is initialized by https://github.com/ansible/ansible-creator 24.5.0 3 | 4 | # See https://docs.ansible.com/ansible/latest/dev_guide/collections_galaxy_meta.html 5 | 6 | namespace: "ceph" 7 | name: "automation" 8 | version: 1.1.0 9 | readme: README.md 10 | authors: 11 | - Teoman ONAY 12 | 13 | description: Ceph automation modules 14 | license_file: LICENSE 15 | # TO-DO: update the tags based on your content type 16 | tags: ["linux", "tools"] 17 | dependencies: {} 18 | 19 | repository: https://github.com/ceph/ceph.automation 20 | documentation: https://docs.ceph.com/projects/ceph.automation 21 | homepage: https://github.com/ceph/ceph.automation 22 | issues: https://github.com/ceph/ceph.automation/issues 23 | 24 | # A list of file glob-like patterns used to filter any files or directories that should not be included in the build 25 | # artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This 26 | # uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry', 27 | # and '.git' are always filtered. Mutually exclusive with 'manifest' 28 | build_ignore: 29 | - .gitignore 30 | - changelogs/.plugin-cache.yaml 31 | - ".*" 32 | 33 | # A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a 34 | # list of MANIFEST.in style 35 | # L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key 36 | # 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive 37 | # with 'build_ignore' 38 | # manifest: null 39 | -------------------------------------------------------------------------------- /tests/unit/module_utils/test_ceph_common.py: -------------------------------------------------------------------------------- 1 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import build_base_cmd_orch, fatal 2 | import pytest 3 | from mock.mock import MagicMock 4 | 5 | 6 | class TestCephCommon(object): 7 | def setup_method(self): 8 | self.fake_module = MagicMock() 9 | self.fake_params = {'foo': 'bar'} 10 | self.fake_module.params = self.fake_params 11 | 12 | def test_build_base_cmd_orch_with_fsid_arg(self): 13 | expected_cmd = ['cephadm', 'shell', '--fsid', '123', 'ceph', 'orch'] 14 | self.fake_module.params = {'fsid': '123'} 15 | cmd = build_base_cmd_orch(self.fake_module) 16 | assert cmd == expected_cmd 17 | 18 | def test_build_base_cmd_orch_with_image_arg(self): 19 | expected_cmd = ['cephadm', '--image', 'quay.io/ceph-ci/ceph:main', 'shell', 'ceph', 'orch'] 20 | self.fake_module.params = {'image': 'quay.io/ceph-ci/ceph:main'} 21 | cmd = build_base_cmd_orch(self.fake_module) 22 | assert cmd == expected_cmd 23 | 24 | def test_build_base_cmd_orch_with_docker_arg(self): 25 | expected_cmd = ['cephadm', '--docker', 'shell', 'ceph', 'orch'] 26 | self.fake_module.params = {'docker': True} 27 | cmd = build_base_cmd_orch(self.fake_module) 28 | assert cmd == expected_cmd 29 | 30 | def test_build_base_cmd_orch_no_arg(self): 31 | expected_cmd = ['cephadm', 'shell', 'ceph', 'orch'] 32 | cmd = build_base_cmd_orch(self.fake_module) 33 | assert cmd == expected_cmd 34 | 35 | def test_fatal(self): 36 | fatal("error", self.fake_module) 37 | self.fake_module.fail_json.assert_called_with(msg='error', rc=1) 38 | with pytest.raises(Exception): 39 | fatal("error", False) 40 | -------------------------------------------------------------------------------- /roles/run/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: foo 3 | description: ceph.automation Run Role 4 | company: Ceph 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Choose a valid license ID from https://spdx.org - some suggested licenses: 11 | # - BSD-3-Clause (default) 12 | # - MIT 13 | # - GPL-2.0-or-later 14 | # - GPL-3.0-only 15 | # - Apache-2.0 16 | # - CC-BY-4.0 17 | license: GPL-2.0-or-later 18 | 19 | min_ansible_version: "2.14" 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # 25 | # Provide a list of supported platforms, and for each platform a list of versions. 26 | # If you don't wish to enumerate all versions for a particular platform, use 'all'. 27 | # To view available platforms and versions (or releases), visit: 28 | # https://galaxy.ansible.com/api/v1/platforms/ 29 | # 30 | # platforms: 31 | # - name: Fedora 32 | # versions: 33 | # - all 34 | # - 25 35 | # - name: SomePlatform 36 | # versions: 37 | # - all 38 | # - 1.0 39 | # - 7 40 | # - 99.99 41 | 42 | galaxy_tags: 43 | [] 44 | # List tags for your role here, one per line. A tag is a keyword that describes 45 | # and categorizes the role. Users find roles by searching for tags. Be sure to 46 | # remove the '[]' above, if you add tags to this list. 47 | # 48 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 49 | # Maximum 20 tags per role. 50 | 51 | dependencies: 52 | [] 53 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 54 | # if you add dependencies to this list. 55 | -------------------------------------------------------------------------------- /.github/actions/checkout_dependency/action.yml: -------------------------------------------------------------------------------- 1 | name: "Checkout Dependency" 2 | description: | 3 | checkout repository and override commit based on keyword 'depends-on' from pull request message 4 | inputs: 5 | repository: 6 | description: "Repository name with owner. For example, ansible-collections/cloud.common" 7 | default: ${{ github.repository }} 8 | ref: 9 | description: "The default branch, tag or SHA to checkout if no reference (using depends-on) is found" 10 | path: 11 | description: "Relative path under $GITHUB_WORKSPACE to place the repository" 12 | fetch-depth: 13 | description: "Number of commits to fetch. 0 indicates all history for all branches and tags." 14 | default: "1" 15 | runs: 16 | using: composite 17 | steps: 18 | - name: Set up Python '3.12' 19 | uses: actions/setup-python@v4 20 | with: 21 | python-version: "3.12" 22 | 23 | - name: install PyGithub 24 | run: | 25 | pip install -U PyGithub 26 | shell: bash 27 | 28 | - id: resolve-dependency 29 | shell: bash 30 | run: | 31 | python ${{ github.action_path }}/resolve_dependency.py 32 | env: 33 | RESOLVE_REF_PR_BODY: ${{ github.event.pull_request.body }} 34 | RESOLVE_REF_REPOSITORY: ${{ inputs.repository }} 35 | 36 | - name: Display reference to checkout 37 | run: echo "Resolved reference -> '${RESOLVED_REF}'" 38 | shell: bash 39 | env: 40 | RESOLVED_REF: ${{ steps.resolve-dependency.outputs.merge_commit_sha }} 41 | 42 | - name: checkout repository 43 | uses: actions/checkout@v3 44 | with: 45 | repository: ${{ inputs.repository }} 46 | path: ${{ inputs.path }} 47 | ref: ${{ steps.resolve-dependency.outputs.merge_commit_sha || inputs.ref }} 48 | fetch-depth: ${{ inputs.fetch-depth }} 49 | -------------------------------------------------------------------------------- /.github/workflows/ansible_lint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ansible lint 3 | "on": 4 | workflow_call: 5 | inputs: 6 | args: 7 | description: Arguments to be passed to ansible-lint command. 8 | required: false 9 | default: "" 10 | type: string 11 | setup_python: 12 | description: If false, this action will not setup python and will instead rely on the already installed python. 13 | required: false 14 | default: true 15 | type: boolean 16 | working_directory: 17 | description: The directory where to run ansible-lint from. Default is `github.workspace`. 18 | required: false 19 | default: "" 20 | type: string 21 | jobs: 22 | build: 23 | name: Ansible Lint 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v4 27 | - name: Process inputs 28 | id: inputs 29 | shell: bash 30 | run: | 31 | if [[ -n "${{ inputs.working_directory }}" ]]; then 32 | echo "working_directory=${{ inputs.working_directory }}" >> $GITHUB_OUTPUT 33 | else 34 | echo "working_directory=${{ github.workspace }}" >> $GITHUB_OUTPUT 35 | fi 36 | - name: Set up Python 37 | if: inputs.setup_python == 'true' 38 | uses: actions/setup-python@v5 39 | with: 40 | cache: pip 41 | cache-dependency-path: ${{ steps.inputs.outputs.working_directory }}/.git/ansible-lint-requirements.txt 42 | python-version: "3.11" 43 | - name: Install ansible-lint from pip 44 | shell: bash 45 | run: | 46 | pip install ansible-lint 47 | ansible-lint --version 48 | - name: Run ansible-lint 49 | shell: bash 50 | working-directory: ${{ steps.inputs.outputs.working_directory }} 51 | run: ansible-lint ${{ inputs.args }} 52 | -------------------------------------------------------------------------------- /roles/run/README.md: -------------------------------------------------------------------------------- 1 | ceph.automation Run Role 2 | ======================== 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | ```yaml 27 | - name: Execute tasks on servers 28 | hosts: servers 29 | roles: 30 | - role: ceph.automation.run 31 | run_x: 42 32 | ``` 33 | 34 | Another way to consume this role would be: 35 | 36 | ```yaml 37 | - name: Initialize the run role from ceph.automation 38 | hosts: servers 39 | gather_facts: false 40 | tasks: 41 | - name: Trigger invocation of run role 42 | ansible.builtin.include_role: 43 | name: ceph.automation.run 44 | vars: 45 | run_x: 42 46 | ``` 47 | 48 | License 49 | ------- 50 | 51 | # TO-DO: Update the license to the one you want to use (delete this line after setting the license) 52 | BSD 53 | 54 | Author Information 55 | ------------------ 56 | 57 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 58 | -------------------------------------------------------------------------------- /docs/docsite/links.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This will make sure that plugin and module documentation gets Edit on GitHub links 3 | # that allow users to directly create a PR for this plugin or module in GitHub's UI. 4 | # Remove this section if the collection repository is not on GitHub, or if you do not 5 | # want this functionality for your collection. 6 | edit_on_github: 7 | # TO-DO: Update this if your collection lives in a different GitHub organization. 8 | repository: ansible-collections/ceph.automation 9 | branch: main 10 | # If your collection root (the directory containing galaxy.yml) does not coincide with your 11 | # repository's root, you have to specify the path to the collection root here. For example, 12 | # if the collection root is in a subdirectory ansible_collections/community/REPO_NAME 13 | # in your repository, you have to set path_prefix to 'ansible_collections/community/REPO_NAME'. 14 | path_prefix: "" 15 | 16 | # Here you can add arbitrary extra links. Please keep the number of links down to a 17 | # minimum! Also please keep the description short, since this will be the text put on 18 | # a button. 19 | # 20 | # Also note that some links are automatically added from information in galaxy.yml. 21 | # The following are automatically added: 22 | # 1. A link to the issue tracker (if `issues` is specified); 23 | # 2. A link to the homepage (if `homepage` is specified and does not equal the 24 | # `documentation` or `repository` link); 25 | # 3. A link to the collection's repository (if `repository` is specified). 26 | 27 | extra_links: 28 | - description: Report an issue 29 | # TO-DO: Update this if your collection lives in a different GitHub organization. 30 | url: https://github.com/ansible-collections/ceph.automation/issues/new/choose 31 | 32 | # Specify communication channels for your collection. We suggest to not specify more 33 | # than one place for communication per communication tool to avoid confusion. 34 | communication: 35 | matrix_rooms: 36 | - topic: General usage and support questions 37 | room: "#users:ansible.im" 38 | irc_channels: 39 | - topic: General usage and support questions 40 | network: Libera 41 | channel: "#ansible" 42 | mailing_lists: 43 | - topic: Ansible Project List 44 | url: https://groups.google.com/g/ansible-project 45 | # You can also add a `subscribe` field with an URI that allows to subscribe 46 | # to the mailing list. For lists on https://groups.google.com/ a subscribe link is 47 | # automatically generated. 48 | -------------------------------------------------------------------------------- /.github/workflows/sanity.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ansible sanity 3 | 4 | "on": 5 | workflow_call: 6 | inputs: 7 | extra_matrix_entries: 8 | required: false 9 | type: string 10 | jobs: 11 | tox-matrix: 12 | name: Matrix Sanity 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | with: 17 | ref: "${{github.event.pull_request.head.ref}}" 18 | repository: "${{ github.event.pull_request.head.repo.full_name }}" 19 | - name: Set up Python 20 | uses: actions/setup-python@v5 21 | with: 22 | python-version: "3.11" 23 | - name: "Install tox-ansible, includes tox" 24 | run: "python -m pip install tox-ansible" 25 | - name: "Check for tox-ansible.ini file, else add default" 26 | uses: ./.github/actions/add_tox_ansible 27 | - name: Generate matrix 28 | id: generate-matrix 29 | run: > 30 | python -m tox --ansible --gh-matrix --matrix-scope sanity --conf 31 | tox-ansible.ini 32 | - name: Merge matrix with extra entries 33 | id: merge-matrix 34 | run: | 35 | echo "Generated matrix: ${{ steps.generate-matrix.outputs.envlist }}" 36 | echo "Extra matrix: ${{ inputs.extra_matrix_entries }}" 37 | 38 | merged=$( 39 | jq -c -n \ 40 | --argjson a '${{ steps.generate-matrix.outputs.envlist }}' \ 41 | --argjson b '${{ inputs.extra_matrix_entries || '[]' }}' \ 42 | '$a + $b' 43 | ) 44 | echo "Merged matrix: $merged" 45 | echo "envlist=$merged" >> "$GITHUB_OUTPUT" 46 | outputs: 47 | envlist: "${{ steps.merge-matrix.outputs.envlist }}" 48 | test: 49 | needs: tox-matrix 50 | strategy: 51 | fail-fast: false 52 | matrix: 53 | entry: "${{ fromJson(needs.tox-matrix.outputs.envlist) }}" 54 | name: "${{ matrix.entry.name }}" 55 | runs-on: ubuntu-latest 56 | steps: 57 | - uses: actions/checkout@v4 58 | with: 59 | ref: "${{ github.event.pull_request.head.sha }}" 60 | fetch-depth: 0 61 | - name: Set up Python 62 | uses: actions/setup-python@v5 63 | with: 64 | python-version: "${{ matrix.entry.python }}" 65 | - name: "Install tox-ansible, includes tox" 66 | run: python -m pip install tox-ansible 67 | - name: "Check for tox-ansible.ini file, else add default" 68 | uses: ./.github/actions/add_tox_ansible 69 | - name: Run tox sanity tests 70 | run: >- 71 | python -m tox --ansible -e ${{ matrix.entry.name }} --conf 72 | tox-ansible.ini 73 | env: 74 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 75 | -------------------------------------------------------------------------------- /.github/actions/checkout_dependency/resolve_dependency.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """Script to check if a depends-on pull request has been defined into pull request body.""" 3 | 4 | import logging 5 | import os 6 | import re 7 | import sys 8 | 9 | from github import Github 10 | 11 | 12 | FORMAT = "[%(asctime)s] - %(message)s" 13 | logging.basicConfig(format=FORMAT) 14 | logger = logging.getLogger("resolve_dependency") 15 | logger.setLevel(logging.DEBUG) 16 | 17 | 18 | def get_pr_merge_commit_sha(repository: str, pr_number: int) -> str: 19 | """Retrieve pull request merge commit sha. 20 | 21 | :param repository: The repository name 22 | :param pr_number: The pull request number 23 | :returns: The pull request merge commit sha if it exists 24 | :raises ValueError: if the pull request is not mergeable 25 | """ 26 | access_token = os.environ.get("GITHUB_TOKEN") 27 | gh_obj = Github(access_token) 28 | repo = gh_obj.get_repo(repository) 29 | 30 | pr_obj = repo.get_pull(pr_number) 31 | if not pr_obj.mergeable: 32 | # raise an error when the pull request is not mergeable 33 | sys.tracebacklimit = -1 34 | raise ValueError(f"Pull request {pr_number} from {repository} is not mergeable") 35 | 36 | return pr_obj.merge_commit_sha 37 | 38 | 39 | def resolve_ref(pr_body: str, repository: str) -> int: 40 | """Get pull request reference number defined with Depends-On. 41 | 42 | :param pr_body: the pull request body 43 | :param repository: The repository name 44 | :returns: pull request number if it is defined else 0 45 | """ 46 | pr_regx = re.compile( 47 | rf"^Depends-On:[ ]*https://github.com/{repository}/pull/(\d+)\s*$", 48 | re.MULTILINE | re.IGNORECASE, 49 | ) 50 | # Search for expression starting with depends-on not case-sensitive 51 | match = pr_regx.search(pr_body) 52 | return int(match.group(1)) if match else 0 53 | 54 | 55 | def main() -> None: 56 | """Run the script.""" 57 | pr_body = os.environ.get("RESOLVE_REF_PR_BODY") or "" 58 | repository = os.environ.get("RESOLVE_REF_REPOSITORY") or "" 59 | 60 | if not repository: 61 | return 62 | 63 | pr_number = resolve_ref(pr_body, repository) 64 | if not pr_number: 65 | return 66 | logger.info("Override checkout with pr number: %d", pr_number) 67 | 68 | # get pull request merge commit sha 69 | merge_commit_sha = get_pr_merge_commit_sha(repository, pr_number) 70 | logger.info("merge commit sha for pull request %d => '%s'", pr_number, merge_commit_sha) 71 | github_output = os.environ.get("GITHUB_OUTPUT") 72 | if github_output: 73 | with open(str(github_output), "a", encoding="utf-8") as file_handler: 74 | file_handler.write(f"merge_commit_sha={merge_commit_sha}\n") 75 | 76 | 77 | if __name__ == "__main__": 78 | main() 79 | -------------------------------------------------------------------------------- /tests/sanity/ignore-2.15.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/ceph_add_users_buckets.py validate-modules:missing-gplv3-license # ignore license check 2 | plugins/modules/cephadm_adopt.py validate-modules:missing-gplv3-license # ignore license check 3 | plugins/modules/cephadm_bootstrap.py validate-modules:missing-gplv3-license # ignore license check 4 | plugins/modules/cephadm_registry_login.py validate-modules:missing-gplv3-license # ignore license check 5 | plugins/modules/ceph_authtool.py validate-modules:missing-gplv3-license # ignore license check 6 | plugins/modules/ceph_config.py validate-modules:missing-gplv3-license # ignore license check 7 | plugins/modules/ceph_crush.py validate-modules:missing-gplv3-license # ignore license check 8 | plugins/modules/ceph_crush_rule.py validate-modules:missing-gplv3-license # ignore license check 9 | plugins/modules/ceph_crush_rule_info.py validate-modules:missing-gplv3-license # ignore license check 10 | plugins/modules/ceph_dashboard_user.py validate-modules:missing-gplv3-license # ignore license check 11 | plugins/modules/ceph_ec_profile.py validate-modules:missing-gplv3-license # ignore license check 12 | plugins/modules/ceph_fs.py validate-modules:missing-gplv3-license # ignore license check 13 | plugins/modules/ceph_key.py validate-modules:missing-gplv3-license # ignore license check 14 | plugins/modules/ceph_key_info.py validate-modules:missing-gplv3-license # ignore license check 15 | plugins/modules/ceph_key_list.py validate-modules:missing-gplv3-license # ignore license check 16 | plugins/modules/ceph_mgr_module.py validate-modules:missing-gplv3-license # ignore license check 17 | plugins/modules/ceph_orch_apply.py validate-modules:missing-gplv3-license # ignore license check 18 | plugins/modules/ceph_orch_daemon.py validate-modules:missing-gplv3-license # ignore license check 19 | plugins/modules/ceph_orch_host.py validate-modules:missing-gplv3-license # ignore license check 20 | plugins/modules/ceph_osd_flag.py validate-modules:missing-gplv3-license # ignore license check 21 | plugins/modules/ceph_osd.py validate-modules:missing-gplv3-license # ignore license check 22 | plugins/modules/ceph_pool.py validate-modules:missing-gplv3-license # ignore license check 23 | plugins/modules/ceph_volume.py validate-modules:missing-gplv3-license # ignore license check 24 | plugins/modules/ceph_volume_simple_activate.py validate-modules:missing-gplv3-license # ignore license check 25 | plugins/modules/ceph_volume_simple_scan.py validate-modules:missing-gplv3-license # ignore license check 26 | plugins/modules/radosgw_caps.py validate-modules:missing-gplv3-license # ignore license check 27 | plugins/modules/radosgw_realm.py validate-modules:missing-gplv3-license # ignore license check 28 | plugins/modules/radosgw_user.py validate-modules:missing-gplv3-license # ignore license check 29 | plugins/modules/radosgw_zonegroup.py validate-modules:missing-gplv3-license # ignore license check 30 | plugins/modules/radosgw_zone.py validate-modules:missing-gplv3-license # ignore license check -------------------------------------------------------------------------------- /tests/sanity/ignore-2.16.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/ceph_add_users_buckets.py validate-modules:missing-gplv3-license # ignore license check 2 | plugins/modules/cephadm_adopt.py validate-modules:missing-gplv3-license # ignore license check 3 | plugins/modules/cephadm_bootstrap.py validate-modules:missing-gplv3-license # ignore license check 4 | plugins/modules/cephadm_registry_login.py validate-modules:missing-gplv3-license # ignore license check 5 | plugins/modules/ceph_authtool.py validate-modules:missing-gplv3-license # ignore license check 6 | plugins/modules/ceph_config.py validate-modules:missing-gplv3-license # ignore license check 7 | plugins/modules/ceph_crush.py validate-modules:missing-gplv3-license # ignore license check 8 | plugins/modules/ceph_crush_rule.py validate-modules:missing-gplv3-license # ignore license check 9 | plugins/modules/ceph_crush_rule_info.py validate-modules:missing-gplv3-license # ignore license check 10 | plugins/modules/ceph_dashboard_user.py validate-modules:missing-gplv3-license # ignore license check 11 | plugins/modules/ceph_ec_profile.py validate-modules:missing-gplv3-license # ignore license check 12 | plugins/modules/ceph_fs.py validate-modules:missing-gplv3-license # ignore license check 13 | plugins/modules/ceph_key.py validate-modules:missing-gplv3-license # ignore license check 14 | plugins/modules/ceph_key_info.py validate-modules:missing-gplv3-license # ignore license check 15 | plugins/modules/ceph_key_list.py validate-modules:missing-gplv3-license # ignore license check 16 | plugins/modules/ceph_mgr_module.py validate-modules:missing-gplv3-license # ignore license check 17 | plugins/modules/ceph_orch_apply.py validate-modules:missing-gplv3-license # ignore license check 18 | plugins/modules/ceph_orch_daemon.py validate-modules:missing-gplv3-license # ignore license check 19 | plugins/modules/ceph_orch_host.py validate-modules:missing-gplv3-license # ignore license check 20 | plugins/modules/ceph_osd_flag.py validate-modules:missing-gplv3-license # ignore license check 21 | plugins/modules/ceph_osd.py validate-modules:missing-gplv3-license # ignore license check 22 | plugins/modules/ceph_pool.py validate-modules:missing-gplv3-license # ignore license check 23 | plugins/modules/ceph_volume.py validate-modules:missing-gplv3-license # ignore license check 24 | plugins/modules/ceph_volume_simple_activate.py validate-modules:missing-gplv3-license # ignore license check 25 | plugins/modules/ceph_volume_simple_scan.py validate-modules:missing-gplv3-license # ignore license check 26 | plugins/modules/radosgw_caps.py validate-modules:missing-gplv3-license # ignore license check 27 | plugins/modules/radosgw_realm.py validate-modules:missing-gplv3-license # ignore license check 28 | plugins/modules/radosgw_user.py validate-modules:missing-gplv3-license # ignore license check 29 | plugins/modules/radosgw_zonegroup.py validate-modules:missing-gplv3-license # ignore license check 30 | plugins/modules/radosgw_zone.py validate-modules:missing-gplv3-license # ignore license check -------------------------------------------------------------------------------- /tests/sanity/ignore-2.17.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/ceph_add_users_buckets.py validate-modules:missing-gplv3-license # ignore license check 2 | plugins/modules/cephadm_adopt.py validate-modules:missing-gplv3-license # ignore license check 3 | plugins/modules/cephadm_bootstrap.py validate-modules:missing-gplv3-license # ignore license check 4 | plugins/modules/cephadm_registry_login.py validate-modules:missing-gplv3-license # ignore license check 5 | plugins/modules/ceph_authtool.py validate-modules:missing-gplv3-license # ignore license check 6 | plugins/modules/ceph_config.py validate-modules:missing-gplv3-license # ignore license check 7 | plugins/modules/ceph_crush.py validate-modules:missing-gplv3-license # ignore license check 8 | plugins/modules/ceph_crush_rule.py validate-modules:missing-gplv3-license # ignore license check 9 | plugins/modules/ceph_crush_rule_info.py validate-modules:missing-gplv3-license # ignore license check 10 | plugins/modules/ceph_dashboard_user.py validate-modules:missing-gplv3-license # ignore license check 11 | plugins/modules/ceph_ec_profile.py validate-modules:missing-gplv3-license # ignore license check 12 | plugins/modules/ceph_fs.py validate-modules:missing-gplv3-license # ignore license check 13 | plugins/modules/ceph_key.py validate-modules:missing-gplv3-license # ignore license check 14 | plugins/modules/ceph_key_info.py validate-modules:missing-gplv3-license # ignore license check 15 | plugins/modules/ceph_key_list.py validate-modules:missing-gplv3-license # ignore license check 16 | plugins/modules/ceph_mgr_module.py validate-modules:missing-gplv3-license # ignore license check 17 | plugins/modules/ceph_orch_apply.py validate-modules:missing-gplv3-license # ignore license check 18 | plugins/modules/ceph_orch_daemon.py validate-modules:missing-gplv3-license # ignore license check 19 | plugins/modules/ceph_orch_host.py validate-modules:missing-gplv3-license # ignore license check 20 | plugins/modules/ceph_osd_flag.py validate-modules:missing-gplv3-license # ignore license check 21 | plugins/modules/ceph_osd.py validate-modules:missing-gplv3-license # ignore license check 22 | plugins/modules/ceph_pool.py validate-modules:missing-gplv3-license # ignore license check 23 | plugins/modules/ceph_volume.py validate-modules:missing-gplv3-license # ignore license check 24 | plugins/modules/ceph_volume_simple_activate.py validate-modules:missing-gplv3-license # ignore license check 25 | plugins/modules/ceph_volume_simple_scan.py validate-modules:missing-gplv3-license # ignore license check 26 | plugins/modules/radosgw_caps.py validate-modules:missing-gplv3-license # ignore license check 27 | plugins/modules/radosgw_realm.py validate-modules:missing-gplv3-license # ignore license check 28 | plugins/modules/radosgw_user.py validate-modules:missing-gplv3-license # ignore license check 29 | plugins/modules/radosgw_zonegroup.py validate-modules:missing-gplv3-license # ignore license check 30 | plugins/modules/radosgw_zone.py validate-modules:missing-gplv3-license # ignore license check -------------------------------------------------------------------------------- /tests/sanity/ignore-2.18.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/ceph_add_users_buckets.py validate-modules:missing-gplv3-license # ignore license check 2 | plugins/modules/cephadm_adopt.py validate-modules:missing-gplv3-license # ignore license check 3 | plugins/modules/cephadm_bootstrap.py validate-modules:missing-gplv3-license # ignore license check 4 | plugins/modules/cephadm_registry_login.py validate-modules:missing-gplv3-license # ignore license check 5 | plugins/modules/ceph_authtool.py validate-modules:missing-gplv3-license # ignore license check 6 | plugins/modules/ceph_config.py validate-modules:missing-gplv3-license # ignore license check 7 | plugins/modules/ceph_crush.py validate-modules:missing-gplv3-license # ignore license check 8 | plugins/modules/ceph_crush_rule.py validate-modules:missing-gplv3-license # ignore license check 9 | plugins/modules/ceph_crush_rule_info.py validate-modules:missing-gplv3-license # ignore license check 10 | plugins/modules/ceph_dashboard_user.py validate-modules:missing-gplv3-license # ignore license check 11 | plugins/modules/ceph_ec_profile.py validate-modules:missing-gplv3-license # ignore license check 12 | plugins/modules/ceph_fs.py validate-modules:missing-gplv3-license # ignore license check 13 | plugins/modules/ceph_key.py validate-modules:missing-gplv3-license # ignore license check 14 | plugins/modules/ceph_key_info.py validate-modules:missing-gplv3-license # ignore license check 15 | plugins/modules/ceph_key_list.py validate-modules:missing-gplv3-license # ignore license check 16 | plugins/modules/ceph_mgr_module.py validate-modules:missing-gplv3-license # ignore license check 17 | plugins/modules/ceph_orch_apply.py validate-modules:missing-gplv3-license # ignore license check 18 | plugins/modules/ceph_orch_daemon.py validate-modules:missing-gplv3-license # ignore license check 19 | plugins/modules/ceph_orch_host.py validate-modules:missing-gplv3-license # ignore license check 20 | plugins/modules/ceph_osd_flag.py validate-modules:missing-gplv3-license # ignore license check 21 | plugins/modules/ceph_osd.py validate-modules:missing-gplv3-license # ignore license check 22 | plugins/modules/ceph_pool.py validate-modules:missing-gplv3-license # ignore license check 23 | plugins/modules/ceph_volume.py validate-modules:missing-gplv3-license # ignore license check 24 | plugins/modules/ceph_volume_simple_activate.py validate-modules:missing-gplv3-license # ignore license check 25 | plugins/modules/ceph_volume_simple_scan.py validate-modules:missing-gplv3-license # ignore license check 26 | plugins/modules/radosgw_caps.py validate-modules:missing-gplv3-license # ignore license check 27 | plugins/modules/radosgw_realm.py validate-modules:missing-gplv3-license # ignore license check 28 | plugins/modules/radosgw_user.py validate-modules:missing-gplv3-license # ignore license check 29 | plugins/modules/radosgw_zonegroup.py validate-modules:missing-gplv3-license # ignore license check 30 | plugins/modules/radosgw_zone.py validate-modules:missing-gplv3-license # ignore license check -------------------------------------------------------------------------------- /tests/sanity/ignore-2.19.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/ceph_add_users_buckets.py validate-modules:missing-gplv3-license # ignore license check 2 | plugins/modules/cephadm_adopt.py validate-modules:missing-gplv3-license # ignore license check 3 | plugins/modules/cephadm_bootstrap.py validate-modules:missing-gplv3-license # ignore license check 4 | plugins/modules/cephadm_registry_login.py validate-modules:missing-gplv3-license # ignore license check 5 | plugins/modules/ceph_authtool.py validate-modules:missing-gplv3-license # ignore license check 6 | plugins/modules/ceph_config.py validate-modules:missing-gplv3-license # ignore license check 7 | plugins/modules/ceph_crush.py validate-modules:missing-gplv3-license # ignore license check 8 | plugins/modules/ceph_crush_rule.py validate-modules:missing-gplv3-license # ignore license check 9 | plugins/modules/ceph_crush_rule_info.py validate-modules:missing-gplv3-license # ignore license check 10 | plugins/modules/ceph_dashboard_user.py validate-modules:missing-gplv3-license # ignore license check 11 | plugins/modules/ceph_ec_profile.py validate-modules:missing-gplv3-license # ignore license check 12 | plugins/modules/ceph_fs.py validate-modules:missing-gplv3-license # ignore license check 13 | plugins/modules/ceph_key.py validate-modules:missing-gplv3-license # ignore license check 14 | plugins/modules/ceph_key_info.py validate-modules:missing-gplv3-license # ignore license check 15 | plugins/modules/ceph_key_list.py validate-modules:missing-gplv3-license # ignore license check 16 | plugins/modules/ceph_mgr_module.py validate-modules:missing-gplv3-license # ignore license check 17 | plugins/modules/ceph_orch_apply.py validate-modules:missing-gplv3-license # ignore license check 18 | plugins/modules/ceph_orch_daemon.py validate-modules:missing-gplv3-license # ignore license check 19 | plugins/modules/ceph_orch_host.py validate-modules:missing-gplv3-license # ignore license check 20 | plugins/modules/ceph_osd_flag.py validate-modules:missing-gplv3-license # ignore license check 21 | plugins/modules/ceph_osd.py validate-modules:missing-gplv3-license # ignore license check 22 | plugins/modules/ceph_pool.py validate-modules:missing-gplv3-license # ignore license check 23 | plugins/modules/ceph_volume.py validate-modules:missing-gplv3-license # ignore license check 24 | plugins/modules/ceph_volume_simple_activate.py validate-modules:missing-gplv3-license # ignore license check 25 | plugins/modules/ceph_volume_simple_scan.py validate-modules:missing-gplv3-license # ignore license check 26 | plugins/modules/radosgw_caps.py validate-modules:missing-gplv3-license # ignore license check 27 | plugins/modules/radosgw_realm.py validate-modules:missing-gplv3-license # ignore license check 28 | plugins/modules/radosgw_user.py validate-modules:missing-gplv3-license # ignore license check 29 | plugins/modules/radosgw_zonegroup.py validate-modules:missing-gplv3-license # ignore license check 30 | plugins/modules/radosgw_zone.py validate-modules:missing-gplv3-license # ignore license check -------------------------------------------------------------------------------- /tests/sanity/ignore-devel.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/ceph_add_users_buckets.py validate-modules:missing-gplv3-license # ignore license check 2 | plugins/modules/cephadm_adopt.py validate-modules:missing-gplv3-license # ignore license check 3 | plugins/modules/cephadm_bootstrap.py validate-modules:missing-gplv3-license # ignore license check 4 | plugins/modules/cephadm_registry_login.py validate-modules:missing-gplv3-license # ignore license check 5 | plugins/modules/ceph_authtool.py validate-modules:missing-gplv3-license # ignore license check 6 | plugins/modules/ceph_config.py validate-modules:missing-gplv3-license # ignore license check 7 | plugins/modules/ceph_crush.py validate-modules:missing-gplv3-license # ignore license check 8 | plugins/modules/ceph_crush_rule.py validate-modules:missing-gplv3-license # ignore license check 9 | plugins/modules/ceph_crush_rule_info.py validate-modules:missing-gplv3-license # ignore license check 10 | plugins/modules/ceph_dashboard_user.py validate-modules:missing-gplv3-license # ignore license check 11 | plugins/modules/ceph_ec_profile.py validate-modules:missing-gplv3-license # ignore license check 12 | plugins/modules/ceph_fs.py validate-modules:missing-gplv3-license # ignore license check 13 | plugins/modules/ceph_key.py validate-modules:missing-gplv3-license # ignore license check 14 | plugins/modules/ceph_key_info.py validate-modules:missing-gplv3-license # ignore license check 15 | plugins/modules/ceph_key_list.py validate-modules:missing-gplv3-license # ignore license check 16 | plugins/modules/ceph_mgr_module.py validate-modules:missing-gplv3-license # ignore license check 17 | plugins/modules/ceph_orch_apply.py validate-modules:missing-gplv3-license # ignore license check 18 | plugins/modules/ceph_orch_daemon.py validate-modules:missing-gplv3-license # ignore license check 19 | plugins/modules/ceph_orch_host.py validate-modules:missing-gplv3-license # ignore license check 20 | plugins/modules/ceph_osd_flag.py validate-modules:missing-gplv3-license # ignore license check 21 | plugins/modules/ceph_osd.py validate-modules:missing-gplv3-license # ignore license check 22 | plugins/modules/ceph_pool.py validate-modules:missing-gplv3-license # ignore license check 23 | plugins/modules/ceph_volume.py validate-modules:missing-gplv3-license # ignore license check 24 | plugins/modules/ceph_volume_simple_activate.py validate-modules:missing-gplv3-license # ignore license check 25 | plugins/modules/ceph_volume_simple_scan.py validate-modules:missing-gplv3-license # ignore license check 26 | plugins/modules/radosgw_caps.py validate-modules:missing-gplv3-license # ignore license check 27 | plugins/modules/radosgw_realm.py validate-modules:missing-gplv3-license # ignore license check 28 | plugins/modules/radosgw_user.py validate-modules:missing-gplv3-license # ignore license check 29 | plugins/modules/radosgw_zonegroup.py validate-modules:missing-gplv3-license # ignore license check 30 | plugins/modules/radosgw_zone.py validate-modules:missing-gplv3-license # ignore license check -------------------------------------------------------------------------------- /tests/sanity/ignore-milestone.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/ceph_add_users_buckets.py validate-modules:missing-gplv3-license # ignore license check 2 | plugins/modules/cephadm_adopt.py validate-modules:missing-gplv3-license # ignore license check 3 | plugins/modules/cephadm_bootstrap.py validate-modules:missing-gplv3-license # ignore license check 4 | plugins/modules/cephadm_registry_login.py validate-modules:missing-gplv3-license # ignore license check 5 | plugins/modules/ceph_authtool.py validate-modules:missing-gplv3-license # ignore license check 6 | plugins/modules/ceph_config.py validate-modules:missing-gplv3-license # ignore license check 7 | plugins/modules/ceph_crush.py validate-modules:missing-gplv3-license # ignore license check 8 | plugins/modules/ceph_crush_rule.py validate-modules:missing-gplv3-license # ignore license check 9 | plugins/modules/ceph_crush_rule_info.py validate-modules:missing-gplv3-license # ignore license check 10 | plugins/modules/ceph_dashboard_user.py validate-modules:missing-gplv3-license # ignore license check 11 | plugins/modules/ceph_ec_profile.py validate-modules:missing-gplv3-license # ignore license check 12 | plugins/modules/ceph_fs.py validate-modules:missing-gplv3-license # ignore license check 13 | plugins/modules/ceph_key.py validate-modules:missing-gplv3-license # ignore license check 14 | plugins/modules/ceph_key_info.py validate-modules:missing-gplv3-license # ignore license check 15 | plugins/modules/ceph_key_list.py validate-modules:missing-gplv3-license # ignore license check 16 | plugins/modules/ceph_mgr_module.py validate-modules:missing-gplv3-license # ignore license check 17 | plugins/modules/ceph_orch_apply.py validate-modules:missing-gplv3-license # ignore license check 18 | plugins/modules/ceph_orch_daemon.py validate-modules:missing-gplv3-license # ignore license check 19 | plugins/modules/ceph_orch_host.py validate-modules:missing-gplv3-license # ignore license check 20 | plugins/modules/ceph_osd_flag.py validate-modules:missing-gplv3-license # ignore license check 21 | plugins/modules/ceph_osd.py validate-modules:missing-gplv3-license # ignore license check 22 | plugins/modules/ceph_pool.py validate-modules:missing-gplv3-license # ignore license check 23 | plugins/modules/ceph_volume.py validate-modules:missing-gplv3-license # ignore license check 24 | plugins/modules/ceph_volume_simple_activate.py validate-modules:missing-gplv3-license # ignore license check 25 | plugins/modules/ceph_volume_simple_scan.py validate-modules:missing-gplv3-license # ignore license check 26 | plugins/modules/radosgw_caps.py validate-modules:missing-gplv3-license # ignore license check 27 | plugins/modules/radosgw_realm.py validate-modules:missing-gplv3-license # ignore license check 28 | plugins/modules/radosgw_user.py validate-modules:missing-gplv3-license # ignore license check 29 | plugins/modules/radosgw_zonegroup.py validate-modules:missing-gplv3-license # ignore license check 30 | plugins/modules/radosgw_zone.py validate-modules:missing-gplv3-license # ignore license check -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ceph Automation Collection 2 | 3 | This repository contains the `ceph.automation` Ansible Collection. 4 | 5 | ## Tested with Ansible 6 | 7 | Tested with ansible-core >=2.15 releases and the current development version of ansible-core. 8 | 9 | ## External requirements 10 | 11 | Some modules and plugins require external libraries. Please check the requirements for each plugin or module you use in the documentation to find out which requirements are needed. 12 | 13 | ## Included content 14 | 15 | Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/ceph/automation). 16 | 17 | ## Using this collection 18 | 19 | ``` 20 | ansible-galaxy collection install ceph.automation 21 | ``` 22 | 23 | You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format: 24 | 25 | ```yaml 26 | collections: 27 | - name: ceph.automation 28 | ``` 29 | 30 | To upgrade the collection to the latest available version, run the following command: 31 | 32 | ```bash 33 | ansible-galaxy collection install ceph.automation --upgrade 34 | ``` 35 | 36 | You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/ceph/automation): 37 | 38 | ```bash 39 | ansible-galaxy collection install ceph.automation:==X.Y.Z 40 | ``` 41 | 42 | See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. 43 | 44 | ## Release notes 45 | 46 | See the [changelog](https://github.com/ceph/ceph.automation/blob/main/CHANGELOG.rst). 47 | 48 | ## Roadmap 49 | 50 | 51 | 52 | ## More information 53 | 54 | 55 | 56 | - [Ansible Collection overview](https://github.com/ansible-collections/overview) 57 | - [Ansible User guide](https://docs.ansible.com/ansible/devel/user_guide/index.html) 58 | - [Ansible Developer guide](https://docs.ansible.com/ansible/devel/dev_guide/index.html) 59 | - [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst) 60 | - [Ansible Community code of conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html) 61 | - [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420) 62 | - [News for Maintainers](https://github.com/ansible-collections/news-for-maintainers) 63 | 64 | ## Licensing 65 | 66 | Apache License, Version 2.0. 67 | 68 | See [LICENSE](http://www.apache.org/licenses/LICENSE-2.0) to see the full text. 69 | -------------------------------------------------------------------------------- /.github/workflows/unit.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ansible Unit 3 | "on": 4 | workflow_call: 5 | inputs: 6 | collection_pre_install: 7 | required: false 8 | type: string 9 | default: "" 10 | extra_matrix_entries: 11 | required: false 12 | type: string 13 | jobs: 14 | tox-matrix: 15 | name: Matrix Unit 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | with: 20 | ref: "${{github.event.pull_request.head.ref}}" 21 | repository: "${{ github.event.pull_request.head.repo.full_name }}" 22 | - name: Set up Python 23 | uses: actions/setup-python@v5 24 | with: 25 | python-version: "3.11" 26 | - name: "Check for tox-ansible.ini file, else add default" 27 | uses: ./.github/actions/add_tox_ansible 28 | - name: "Install tox-ansible, includes tox" 29 | run: "python -m pip install tox-ansible" 30 | - name: Generate matrix 31 | id: generate-matrix 32 | run: > 33 | python -m tox --ansible --gh-matrix --matrix-scope unit --conf 34 | tox-ansible.ini 35 | - name: Merge matrix with extra entries 36 | id: merge-matrix 37 | run: | 38 | echo "Generated matrix: ${{ steps.generate-matrix.outputs.envlist }}" 39 | echo "Extra matrix: ${{ inputs.extra_matrix_entries }}" 40 | 41 | merged=$( 42 | jq -c -n \ 43 | --argjson a '${{ steps.generate-matrix.outputs.envlist }}' \ 44 | --argjson b '${{ inputs.extra_matrix_entries || '[]' }}' \ 45 | '$a + $b' 46 | ) 47 | echo "Merged matrix: $merged" 48 | echo "envlist=$merged" >> "$GITHUB_OUTPUT" 49 | outputs: 50 | envlist: "${{ steps.merge-matrix.outputs.envlist }}" 51 | test: 52 | needs: tox-matrix 53 | strategy: 54 | fail-fast: false 55 | matrix: 56 | entry: "${{ fromJson(needs.tox-matrix.outputs.envlist) }}" 57 | name: "${{ matrix.entry.name }}" 58 | runs-on: ubuntu-latest 59 | steps: 60 | - uses: actions/checkout@v4 61 | with: 62 | ref: "${{ github.event.pull_request.head.sha }}" 63 | fetch-depth: 0 64 | - name: Set up Python 65 | uses: actions/setup-python@v5 66 | with: 67 | python-version: "${{ matrix.entry.python }}" 68 | - name: "Install tox-ansible, includes tox" 69 | run: python -m pip install tox-ansible 70 | - name: "Check for tox-ansible.ini file, else add default" 71 | uses: ./.github/actions/add_tox_ansible 72 | - name: Install build toolchain and openssl headers on Linux 73 | shell: bash 74 | run: sudo apt update && sudo apt install build-essential libssl-dev 75 | if: ${{ matrix.entry.python >= 3.12 }} 76 | - name: Install catchsegv and libssh headers on Linux for cythonize+coverage 77 | shell: bash 78 | run: sudo apt update && sudo apt install libssh-dev 79 | if: ${{ matrix.entry.python >= 3.12 }} 80 | - name: Run tox unit tests 81 | run: >- 82 | python -m tox --ansible -e ${{ matrix.entry.name }} --conf 83 | tox-ansible.ini 84 | env: 85 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 86 | -------------------------------------------------------------------------------- /plugins/modules/ceph_crush_rule_info.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020, Red Hat, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import, division, print_function 16 | __metaclass__ = type 17 | 18 | 19 | ANSIBLE_METADATA = { 20 | 'metadata_version': '1.1', 21 | 'status': ['preview'], 22 | 'supported_by': 'community' 23 | } 24 | 25 | DOCUMENTATION = ''' 26 | --- 27 | module: ceph_crush_rule_info 28 | short_description: Lists Ceph Crush Replicated/Erasure Rules 29 | version_added: "1.1.0" 30 | description: 31 | - Retrieces Ceph Crush rule(s). 32 | options: 33 | name: 34 | description: 35 | - name of the Ceph Crush rule. If state is 'info' - empty string can be provided as a value to get all crush rules 36 | type: str 37 | required: false 38 | cluster: 39 | description: 40 | - The ceph cluster name. 41 | type: str 42 | required: false 43 | default: ceph 44 | author: 45 | - Teoman ONAY (@asm0deuz) 46 | ''' 47 | 48 | EXAMPLES = ''' 49 | - name: get a Ceph Crush rule information 50 | ceph_crush_rule_info: 51 | name: foo 52 | ''' 53 | 54 | RETURN = '''# ''' 55 | 56 | from ansible.module_utils.basic import AnsibleModule 57 | try: 58 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module, \ 59 | is_containerized, \ 60 | exec_command 61 | except ImportError: 62 | from module_utils.ceph_common import exit_module, \ 63 | is_containerized, \ 64 | exec_command 65 | 66 | try: 67 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_crush_rule_common import get_rule 68 | except ImportError: 69 | from module_utils.ceph_crush_rule_common import get_rule 70 | 71 | import datetime 72 | 73 | 74 | def main(): 75 | module = AnsibleModule( 76 | argument_spec=dict( 77 | name=dict(type='str', required=False), 78 | cluster=dict(type='str', required=False, default='ceph'), 79 | ), 80 | supports_check_mode=True, 81 | ) 82 | 83 | if module.check_mode: 84 | module.exit_json( 85 | changed=False, 86 | stdout='', 87 | stderr='', 88 | rc=0, 89 | start='', 90 | end='', 91 | delta='', 92 | ) 93 | 94 | startd = datetime.datetime.now() 95 | changed = False 96 | 97 | # will return either the image name or None 98 | container_image = is_containerized() 99 | 100 | rc, cmd, out, err = exec_command(module, get_rule(module, container_image=container_image)) # noqa: E501 101 | 102 | exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 103 | 104 | 105 | if __name__ == '__main__': 106 | main() 107 | -------------------------------------------------------------------------------- /tests/unit/modules/test_radosgw_caps.py: -------------------------------------------------------------------------------- 1 | import os 2 | from mock.mock import patch, MagicMock 3 | import pytest 4 | 5 | # sys.path.append("./library") 6 | from ansible_collections.ceph.automation.plugins.modules import radosgw_caps 7 | 8 | 9 | fake_binary = "radosgw-admin" 10 | fake_cluster = "ceph" 11 | fake_container_binary = "podman" 12 | fake_container_image = "docker.io/ceph/daemon:latest" 13 | fake_container_cmd = [ 14 | fake_container_binary, 15 | "run", 16 | "--rm", 17 | "--net=host", 18 | "-v", 19 | "/etc/ceph:/etc/ceph:z", 20 | "-v", 21 | "/var/lib/ceph/:/var/lib/ceph/:z", 22 | "-v", 23 | "/var/log/ceph/:/var/log/ceph/:z", 24 | "--entrypoint=" + fake_binary, 25 | fake_container_image, 26 | ] 27 | fake_user = "foo" 28 | fake_caps = ["users=write", "zone=*", "metadata=read,write"] 29 | fake_params = { 30 | "cluster": fake_cluster, 31 | "name": fake_user, 32 | "caps": fake_caps, 33 | } 34 | 35 | 36 | class TestRadosgwCapsModule(object): 37 | @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) 38 | def test_container_exec(self): 39 | cmd = radosgw_caps.container_exec(fake_binary, fake_container_image) 40 | assert cmd == fake_container_cmd 41 | 42 | def test_not_is_containerized(self): 43 | assert radosgw_caps.is_containerized() is None 44 | 45 | @patch.dict(os.environ, {"CEPH_CONTAINER_IMAGE": fake_container_image}) 46 | def test_is_containerized(self): 47 | assert radosgw_caps.is_containerized() == fake_container_image 48 | 49 | @pytest.mark.parametrize("image", [None, fake_container_image]) 50 | @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) 51 | def test_pre_generate_radosgw_cmd(self, image): 52 | if image: 53 | expected_cmd = fake_container_cmd 54 | else: 55 | expected_cmd = [fake_binary] 56 | 57 | assert radosgw_caps.pre_generate_radosgw_cmd(image) == expected_cmd 58 | 59 | @pytest.mark.parametrize("image", [None, fake_container_image]) 60 | @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) 61 | def test_generate_radosgw_cmd(self, image): 62 | if image: 63 | expected_cmd = fake_container_cmd 64 | else: 65 | expected_cmd = [fake_binary] 66 | 67 | expected_cmd.extend(["--cluster", fake_cluster, "caps"]) 68 | assert ( 69 | radosgw_caps.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd 70 | ) 71 | 72 | def test_add_caps(self): 73 | fake_module = MagicMock() 74 | fake_module.params = fake_params 75 | expected_cmd = [ 76 | fake_binary, 77 | "--cluster", 78 | fake_cluster, 79 | "caps", 80 | "add", 81 | "--uid=" + fake_user, 82 | "--caps=" + ";".join(fake_caps), 83 | ] 84 | 85 | assert radosgw_caps.add_caps(fake_module) == expected_cmd 86 | 87 | def test_remove_caps(self): 88 | fake_module = MagicMock() 89 | fake_module.params = fake_params 90 | expected_cmd = [ 91 | fake_binary, 92 | "--cluster", 93 | fake_cluster, 94 | "caps", 95 | "rm", 96 | "--uid=" + fake_user, 97 | "--caps=" + ";".join(fake_caps), 98 | ] 99 | 100 | assert radosgw_caps.remove_caps(fake_module) == expected_cmd 101 | -------------------------------------------------------------------------------- /tests/unit/modules/test_ceph_fs.py: -------------------------------------------------------------------------------- 1 | from mock.mock import MagicMock 2 | from ansible_collections.ceph.automation.plugins.modules import ceph_fs 3 | 4 | 5 | fake_binary = 'ceph' 6 | fake_cluster = 'ceph' 7 | fake_container_binary = 'podman' 8 | fake_container_image = 'docker.io/ceph/daemon:latest' 9 | fake_container_cmd = [ 10 | fake_container_binary, 11 | 'run', 12 | '--rm', 13 | '--net=host', 14 | '-v', '/etc/ceph:/etc/ceph:z', 15 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 16 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 17 | '--entrypoint=' + fake_binary, 18 | fake_container_image 19 | ] 20 | fake_fs = 'foo' 21 | fake_data_pool = 'bar_data' 22 | fake_metadata_pool = 'bar_metadata' 23 | fake_max_mds = 2 24 | fake_params = {'cluster': fake_cluster, 25 | 'name': fake_fs, 26 | 'data': fake_data_pool, 27 | 'metadata': fake_metadata_pool, 28 | 'max_mds': fake_max_mds} 29 | 30 | 31 | class TestCephFsModule(object): 32 | 33 | def test_create_fs(self): 34 | fake_module = MagicMock() 35 | fake_module.params = fake_params 36 | expected_cmd = [ 37 | fake_binary, 38 | '-n', 'client.admin', 39 | '-k', '/etc/ceph/ceph.client.admin.keyring', 40 | '--cluster', fake_cluster, 41 | 'fs', 'new', 42 | fake_fs, 43 | fake_metadata_pool, 44 | fake_data_pool 45 | ] 46 | 47 | assert ceph_fs.create_fs(fake_module) == expected_cmd 48 | 49 | def test_set_fs(self): 50 | fake_module = MagicMock() 51 | fake_module.params = fake_params 52 | expected_cmd = [ 53 | fake_binary, 54 | '-n', 'client.admin', 55 | '-k', '/etc/ceph/ceph.client.admin.keyring', 56 | '--cluster', fake_cluster, 57 | 'fs', 'set', 58 | fake_fs, 59 | 'max_mds', 60 | str(fake_max_mds) 61 | ] 62 | 63 | assert ceph_fs.set_fs(fake_module) == expected_cmd 64 | 65 | def test_get_fs(self): 66 | fake_module = MagicMock() 67 | fake_module.params = fake_params 68 | expected_cmd = [ 69 | fake_binary, 70 | '-n', 'client.admin', 71 | '-k', '/etc/ceph/ceph.client.admin.keyring', 72 | '--cluster', fake_cluster, 73 | 'fs', 'get', 74 | fake_fs, 75 | '--format=json' 76 | ] 77 | 78 | assert ceph_fs.get_fs(fake_module) == expected_cmd 79 | 80 | def test_remove_fs(self): 81 | fake_module = MagicMock() 82 | fake_module.params = fake_params 83 | expected_cmd = [ 84 | fake_binary, 85 | '-n', 'client.admin', 86 | '-k', '/etc/ceph/ceph.client.admin.keyring', 87 | '--cluster', fake_cluster, 88 | 'fs', 'rm', 89 | fake_fs, 90 | '--yes-i-really-mean-it' 91 | ] 92 | 93 | assert ceph_fs.remove_fs(fake_module) == expected_cmd 94 | 95 | def test_fail_fs(self): 96 | fake_module = MagicMock() 97 | fake_module.params = fake_params 98 | expected_cmd = [ 99 | fake_binary, 100 | '-n', 'client.admin', 101 | '-k', '/etc/ceph/ceph.client.admin.keyring', 102 | '--cluster', fake_cluster, 103 | 'fs', 'fail', 104 | fake_fs 105 | ] 106 | 107 | assert ceph_fs.fail_fs(fake_module) == expected_cmd 108 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # https://raw.githubusercontent.com/github/gitignore/main/Python.gitignore 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | cover/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # poetry 99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 100 | # This is especially recommended for binary packages to ensure reproducibility, and is more 101 | # commonly ignored for libraries. 102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 103 | #poetry.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/#use-with-ide 111 | .pdm.toml 112 | 113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 114 | __pypackages__/ 115 | 116 | # Celery stuff 117 | celerybeat-schedule 118 | celerybeat.pid 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # PyCharm 157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 159 | # and can be added to the global gitignore or merged into this file. For a more nuclear 160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 161 | #.idea/ 162 | -------------------------------------------------------------------------------- /.github/actions/build_install_collection/action.yml: -------------------------------------------------------------------------------- 1 | name: Build and install the collection 2 | description: Build and install the collection 3 | 4 | inputs: 5 | install_python_dependencies: 6 | description: "Install collection python dependencies" 7 | required: true 8 | source_path: 9 | description: "Path to the collection source" 10 | required: true 11 | collection_path: 12 | description: | 13 | The final collection path 14 | If not set, this will be determined by the action 15 | default: "" 16 | tar_file: 17 | description: | 18 | The collection tarball when built 19 | If not set, this will be determined by the action 20 | default: "" 21 | ansible_version: 22 | description: | 23 | Ansible Core version from the workflow. 24 | required: false 25 | default: "" 26 | outputs: 27 | collection_path: 28 | description: The final collection path 29 | value: ${{ inputs.collection_path || steps.identify.outputs.collection_path }} 30 | 31 | runs: 32 | using: composite 33 | steps: 34 | - name: Show the galaxy.yml 35 | run: cat galaxy.yml 36 | shell: bash 37 | working-directory: ${{ inputs.source_path }} 38 | 39 | - name: check if bindep.txt exists 40 | id: bindep_check 41 | shell: bash 42 | run: | 43 | if test -f "bindep.txt"; then 44 | echo "file_exists=true" >> $GITHUB_OUTPUT 45 | else 46 | echo "file_exists=false" >> $GITHUB_OUTPUT 47 | fi 48 | working-directory: ${{ inputs.source_path }} 49 | 50 | - name: Install bindep from pypi 51 | run: sudo python3 -m pip install bindep 52 | shell: bash 53 | if: steps.bindep_check.outputs.file_exists == 'true' 54 | 55 | - name: Install missing system packages using bindep.txt 56 | run: bindep test | tail -n +2 | xargs sudo apt-get -o Debug::pkgProblemResolver=true -o Debug::Acquire::http=true install -y || exit 0 57 | shell: bash 58 | working-directory: ${{ inputs.source_path }} 59 | if: steps.bindep_check.outputs.file_exists == 'true' 60 | 61 | - name: Check for missing system packages using bindep.txt 62 | run: bindep test 63 | shell: bash 64 | working-directory: ${{ inputs.source_path }} 65 | if: steps.bindep_check.outputs.file_exists == 'true' 66 | 67 | - name: Install collection python requirements 68 | if: ${{ inputs.install_python_dependencies == 'true' }} 69 | 70 | run: python3 -m pip install -r requirements.txt -r test-requirements.txt 71 | shell: bash 72 | working-directory: ${{ inputs.source_path }} 73 | 74 | - name: identify collection (final installation path and tarball name) 75 | id: identify 76 | uses: ./.github/actions/identify_collection 77 | with: 78 | source_path: ${{ inputs.source_path }} 79 | if: ${{ (inputs.collection_path == '') || (inputs.tar_file == '') }} 80 | 81 | - name: Build collection 82 | run: ansible-galaxy collection build -vvv 83 | shell: bash 84 | working-directory: ${{ inputs.source_path }} 85 | 86 | - name: Install collection and dependencies (with --pre flag) 87 | run: ansible-galaxy collection install ./${{ steps.identify.outputs.tar_file || inputs.tar_file }} --pre -p /home/runner/collections 88 | shell: bash 89 | working-directory: ${{ inputs.source_path }} 90 | if: ${{ inputs.ansible_version != 'stable-2.9' }} 91 | 92 | - name: Install collection and dependencies (without --pre flag) 93 | run: ansible-galaxy collection install ./${{ steps.identify.outputs.tar_file || inputs.tar_file }} -p /home/runner/collections 94 | shell: bash 95 | working-directory: ${{ inputs.source_path }} 96 | if: ${{ inputs.ansible_version == 'stable-2.9' }} 97 | 98 | - name: Copy the galaxy.yml from source to destination, needed for pytest-ansible-units 99 | run: cp galaxy.yml ${{ steps.identify.outputs.collection_path || inputs.collection_path }}/galaxy.yml 100 | shell: bash 101 | working-directory: ${{ inputs.source_path }} 102 | -------------------------------------------------------------------------------- /plugins/modules/ceph_mgr_module.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright 2020, Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | from __future__ import absolute_import, division, print_function 19 | __metaclass__ = type 20 | 21 | ANSIBLE_METADATA = { 22 | 'metadata_version': '1.1', 23 | 'status': ['preview'], 24 | 'supported_by': 'community' 25 | } 26 | 27 | DOCUMENTATION = ''' 28 | --- 29 | module: ceph_mgr_module 30 | short_description: Manage Ceph MGR module 31 | version_added: "1.1.0" 32 | description: 33 | - Manage Ceph MGR module 34 | options: 35 | name: 36 | description: 37 | - name of the ceph MGR module. 38 | type: str 39 | required: true 40 | cluster: 41 | description: 42 | - The ceph cluster name. 43 | type: str 44 | required: false 45 | default: ceph 46 | state: 47 | description: 48 | - If 'enable' is used, the module enables the MGR module. If 'absent' is used, the module disables the MGR module. 49 | type: str 50 | required: false 51 | choices: ['enable', 'disable'] 52 | default: enable 53 | author: 54 | - Dimitri Savineau (@dsavineau) 55 | ''' 56 | 57 | EXAMPLES = ''' 58 | - name: enable dashboard mgr module 59 | ceph_mgr_module: 60 | name: dashboard 61 | state: enable 62 | 63 | - name: disable multiple mgr modules 64 | ceph_mgr_module: 65 | name: name 66 | state: disable 67 | loop: 68 | - 'dashboard' 69 | - 'prometheus' 70 | ''' 71 | 72 | RETURN = '''# ''' 73 | 74 | from ansible.module_utils.basic import AnsibleModule 75 | try: 76 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module, \ 77 | generate_cmd, \ 78 | is_containerized 79 | except ImportError: 80 | from module_utils.ceph_common import exit_module, \ 81 | generate_cmd, \ 82 | is_containerized 83 | 84 | import datetime 85 | 86 | 87 | def main(): 88 | module = AnsibleModule( 89 | argument_spec=dict( 90 | name=dict(type='str', required=True), 91 | cluster=dict(type='str', required=False, default='ceph'), 92 | state=dict(type='str', required=False, default='enable', choices=['enable', 'disable']), # noqa: E501 93 | ), 94 | supports_check_mode=True, 95 | ) 96 | 97 | name = module.params.get('name') 98 | cluster = module.params.get('cluster') 99 | state = module.params.get('state') 100 | 101 | startd = datetime.datetime.now() 102 | 103 | container_image = is_containerized() 104 | 105 | cmd = generate_cmd(sub_cmd=['mgr', 'module'], 106 | args=[state, name], 107 | cluster=cluster, 108 | container_image=container_image) 109 | 110 | if module.check_mode: 111 | exit_module( 112 | module=module, 113 | out='', 114 | rc=0, 115 | cmd=cmd, 116 | err='', 117 | startd=startd, 118 | changed=False 119 | ) 120 | else: 121 | rc, out, err = module.run_command(cmd) 122 | if 'is already enabled' in err: 123 | changed = False 124 | else: 125 | changed = True 126 | exit_module( 127 | module=module, 128 | out=out, 129 | rc=rc, 130 | cmd=cmd, 131 | err=err, 132 | startd=startd, 133 | changed=changed 134 | ) 135 | 136 | 137 | if __name__ == '__main__': 138 | main() 139 | -------------------------------------------------------------------------------- /plugins/modules/ceph_osd_flag.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright 2020, Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | from __future__ import absolute_import, division, print_function 19 | __metaclass__ = type 20 | 21 | ANSIBLE_METADATA = { 22 | 'metadata_version': '1.1', 23 | 'status': ['preview'], 24 | 'supported_by': 'community' 25 | } 26 | 27 | DOCUMENTATION = ''' 28 | --- 29 | module: ceph_osd_flag 30 | short_description: Manage Ceph OSD flag 31 | version_added: "1.1.0" 32 | description: 33 | - Manage Ceph OSD flag 34 | options: 35 | name: 36 | description: 37 | - name of the ceph OSD flag. 38 | type: str 39 | required: true 40 | choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub'] 41 | cluster: 42 | description: 43 | - The ceph cluster name. 44 | type: str 45 | required: false 46 | default: ceph 47 | state: 48 | description: 49 | - If 'present' is used, the module sets the OSD flag. If 'absent' is used, the module will unset the OSD flag. 50 | type: str 51 | required: false 52 | choices: ['present', 'absent'] 53 | default: present 54 | author: 55 | - Dimitri Savineau (@dsavineau) 56 | ''' 57 | 58 | EXAMPLES = ''' 59 | - name: set noup OSD flag 60 | ceph_osd_flag: 61 | name: noup 62 | 63 | - name: unset multiple OSD flags 64 | ceph_osd_flag: 65 | name: '{{ item }}' 66 | state: absent 67 | loop: 68 | - 'noup' 69 | - 'norebalance' 70 | ''' 71 | 72 | RETURN = '''# ''' 73 | 74 | from ansible.module_utils.basic import AnsibleModule 75 | try: 76 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module, \ 77 | generate_cmd, \ 78 | is_containerized 79 | except ImportError: 80 | from module_utils.ceph_common import exit_module, \ 81 | generate_cmd, \ 82 | is_containerized 83 | 84 | import datetime 85 | 86 | 87 | def main(): 88 | module = AnsibleModule( 89 | argument_spec=dict( 90 | name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']), # noqa: E501 91 | cluster=dict(type='str', required=False, default='ceph'), 92 | state=dict(type='str', required=False, default='present', choices=['present', 'absent']), # noqa: E501 93 | ), 94 | supports_check_mode=True, 95 | ) 96 | 97 | name = module.params.get('name') 98 | cluster = module.params.get('cluster') 99 | state = module.params.get('state') 100 | 101 | startd = datetime.datetime.now() 102 | 103 | container_image = is_containerized() 104 | 105 | if state == 'present': 106 | cmd = generate_cmd(sub_cmd=['osd', 'set'], args=[name], cluster=cluster, container_image=container_image) # noqa: E501 107 | else: 108 | cmd = generate_cmd(sub_cmd=['osd', 'unset'], args=[name], cluster=cluster, container_image=container_image) # noqa: E501 109 | 110 | if module.check_mode: 111 | exit_module( 112 | module=module, 113 | out='', 114 | rc=0, 115 | cmd=cmd, 116 | err='', 117 | startd=startd, 118 | changed=False 119 | ) 120 | else: 121 | rc, out, err = module.run_command(cmd) 122 | exit_module( 123 | module=module, 124 | out=out, 125 | rc=rc, 126 | cmd=cmd, 127 | err=err, 128 | startd=startd, 129 | changed=True 130 | ) 131 | 132 | 133 | if __name__ == '__main__': 134 | main() 135 | -------------------------------------------------------------------------------- /plugins/modules/ceph_osd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright 2020, Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | from __future__ import absolute_import, division, print_function 19 | __metaclass__ = type 20 | 21 | ANSIBLE_METADATA = { 22 | 'metadata_version': '1.1', 23 | 'status': ['preview'], 24 | 'supported_by': 'community' 25 | } 26 | 27 | DOCUMENTATION = ''' 28 | --- 29 | module: ceph_osd 30 | short_description: Manage Ceph OSD state 31 | version_added: "1.1.0" 32 | description: 33 | - Manage Ceph OSD state 34 | options: 35 | ids: 36 | description: 37 | - The ceph OSD id(s). 38 | type: list 39 | elements: int 40 | required: true 41 | cluster: 42 | description: 43 | - The ceph cluster name. 44 | type: str 45 | required: false 46 | default: ceph 47 | state: 48 | description: 49 | - The ceph OSD state. 50 | type: str 51 | required: true 52 | choices: ['destroy', 'down', 'in', 'out', 'purge', 'rm'] 53 | author: 54 | - Dimitri Savineau (@dsavineau) 55 | ''' 56 | 57 | EXAMPLES = ''' 58 | - name: destroy OSD 42 59 | ceph_osd: 60 | ids: 42 61 | state: destroy 62 | 63 | - name: set multiple OSDs down 64 | ceph_osd: 65 | ids: [0, 1, 3] 66 | state: down 67 | 68 | - name: set OSD 42 in 69 | ceph_osd: 70 | ids: 42 71 | state: in 72 | 73 | - name: set OSD 42 out 74 | ceph_osd: 75 | ids: 42 76 | state: out 77 | 78 | - name: purge OSD 42 79 | ceph_osd: 80 | ids: 42 81 | state: purge 82 | 83 | - name: rm OSD 42 84 | ceph_osd: 85 | ids: 42 86 | state: rm 87 | ''' 88 | 89 | RETURN = '''# ''' 90 | 91 | from ansible.module_utils.basic import AnsibleModule 92 | try: 93 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module, generate_cmd, is_containerized # noqa: E501 94 | except ImportError: 95 | from module_utils.ceph_common import exit_module, generate_cmd, is_containerized # noqa: E501 96 | import datetime 97 | 98 | 99 | def main(): 100 | module = AnsibleModule( 101 | argument_spec=dict( 102 | ids=dict(type='list', elements='int', required=True), 103 | cluster=dict(type='str', required=False, default='ceph'), 104 | state=dict(type='str', required=True, choices=['destroy', 'down', 'in', 'out', 'purge', 'rm']), # noqa: E501 105 | ), 106 | supports_check_mode=True, 107 | ) 108 | 109 | ids = module.params.get('ids') 110 | cluster = module.params.get('cluster') 111 | state = module.params.get('state') 112 | 113 | if state in ['destroy', 'purge'] and len(ids) > 1: 114 | module.fail_json(msg='destroy and purge only support one OSD at at time', rc=1) # noqa: E501 115 | 116 | startd = datetime.datetime.now() 117 | 118 | container_image = is_containerized() 119 | 120 | cmd = generate_cmd(sub_cmd=['osd', state], args=ids, cluster=cluster, container_image=container_image) # noqa: E501 121 | 122 | if state in ['destroy', 'purge']: 123 | cmd.append('--yes-i-really-mean-it') 124 | 125 | if module.check_mode: 126 | exit_module( 127 | module=module, 128 | out='', 129 | rc=0, 130 | cmd=cmd, 131 | err='', 132 | startd=startd, 133 | changed=False 134 | ) 135 | else: 136 | rc, out, err = module.run_command(cmd) 137 | changed = True 138 | if state in ['down', 'in', 'out'] and 'marked' not in err: 139 | changed = False 140 | exit_module( 141 | module=module, 142 | out=out, 143 | rc=rc, 144 | cmd=cmd, 145 | err=err, 146 | startd=startd, 147 | changed=changed 148 | ) 149 | 150 | 151 | if __name__ == '__main__': 152 | main() 153 | -------------------------------------------------------------------------------- /tests/unit/modules/test_radosgw_realm.py: -------------------------------------------------------------------------------- 1 | import os 2 | from mock.mock import patch, MagicMock 3 | import pytest 4 | # sys.path.append('./library') 5 | from ansible_collections.ceph.automation.plugins.modules import radosgw_realm 6 | 7 | 8 | fake_binary = 'radosgw-admin' 9 | fake_cluster = 'ceph' 10 | fake_container_binary = 'podman' 11 | fake_container_image = 'docker.io/ceph/daemon:latest' 12 | fake_container_cmd = [ 13 | fake_container_binary, 14 | 'run', 15 | '--rm', 16 | '--net=host', 17 | '-v', '/etc/ceph:/etc/ceph:z', 18 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 19 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 20 | '--entrypoint=' + fake_binary, 21 | fake_container_image 22 | ] 23 | fake_realm = 'foo' 24 | fake_params = {'cluster': fake_cluster, 25 | 'name': fake_realm, 26 | 'default': True} 27 | fake_url = 'http://192.168.42.100:8080' 28 | fake_access_key = '8XQHmFxixz7LCM2AdM2p' 29 | fake_secret_key = 'XC8IhEPJprL6SrpaJDmolVs7jbOvoe2E3AaWKGRx' 30 | 31 | 32 | class TestRadosgwRealmModule(object): 33 | 34 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 35 | def test_container_exec(self): 36 | cmd = radosgw_realm.container_exec(fake_binary, fake_container_image) 37 | assert cmd == fake_container_cmd 38 | 39 | def test_not_is_containerized(self): 40 | assert radosgw_realm.is_containerized() is None 41 | 42 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 43 | def test_is_containerized(self): 44 | assert radosgw_realm.is_containerized() == fake_container_image 45 | 46 | @pytest.mark.parametrize('image', [None, fake_container_image]) 47 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 48 | def test_pre_generate_radosgw_cmd(self, image): 49 | if image: 50 | expected_cmd = fake_container_cmd 51 | else: 52 | expected_cmd = [fake_binary] 53 | 54 | assert radosgw_realm.pre_generate_radosgw_cmd(image) == expected_cmd 55 | 56 | @pytest.mark.parametrize('image', [None, fake_container_image]) 57 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 58 | def test_generate_radosgw_cmd(self, image): 59 | if image: 60 | expected_cmd = fake_container_cmd 61 | else: 62 | expected_cmd = [fake_binary] 63 | 64 | expected_cmd.extend([ 65 | '--cluster', 66 | fake_cluster, 67 | 'realm' 68 | ]) 69 | assert radosgw_realm.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd 70 | 71 | def test_create_realm(self): 72 | fake_module = MagicMock() 73 | fake_module.params = fake_params 74 | expected_cmd = [ 75 | fake_binary, 76 | '--cluster', fake_cluster, 77 | 'realm', 'create', 78 | '--rgw-realm=' + fake_realm, 79 | '--default' 80 | ] 81 | 82 | assert radosgw_realm.create_realm(fake_module) == expected_cmd 83 | 84 | def test_get_realm(self): 85 | fake_module = MagicMock() 86 | fake_module.params = fake_params 87 | expected_cmd = [ 88 | fake_binary, 89 | '--cluster', fake_cluster, 90 | 'realm', 'get', 91 | '--rgw-realm=' + fake_realm, 92 | '--format=json' 93 | ] 94 | 95 | assert radosgw_realm.get_realm(fake_module) == expected_cmd 96 | 97 | def test_remove_realm(self): 98 | fake_module = MagicMock() 99 | fake_module.params = fake_params 100 | expected_cmd = [ 101 | fake_binary, 102 | '--cluster', fake_cluster, 103 | 'realm', 'delete', 104 | '--rgw-realm=' + fake_realm 105 | ] 106 | 107 | assert radosgw_realm.remove_realm(fake_module) == expected_cmd 108 | 109 | def test_pull_realm(self): 110 | fake_module = MagicMock() 111 | fake_params.update({'url': fake_url, 'access_key': fake_access_key, 'secret_key': fake_secret_key}) 112 | fake_module.params = fake_params 113 | expected_cmd = [ 114 | fake_binary, 115 | '--cluster', fake_cluster, 116 | 'realm', 'pull', 117 | '--rgw-realm=' + fake_realm, 118 | '--url=' + fake_url, 119 | '--access-key=' + fake_access_key, 120 | '--secret=' + fake_secret_key 121 | ] 122 | 123 | assert radosgw_realm.pull_realm(fake_module) == expected_cmd 124 | -------------------------------------------------------------------------------- /tests/unit/modules/test_ceph_crush.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from ansible_collections.ceph.automation.plugins.modules import ceph_crush 4 | 5 | 6 | class TestCephCrushModule(object): 7 | 8 | def test_no_host(self): 9 | location = [ 10 | ("chassis", "monchassis"), 11 | ("rack", "monrack"), 12 | ("row", "marow"), 13 | ("pdu", "monpdu"), 14 | ("pod", "monpod"), 15 | ("room", "maroom"), 16 | ("datacenter", "mondc"), 17 | ("region", "maregion"), 18 | ("root", "maroute"), 19 | ] 20 | with pytest.raises(Exception): 21 | ceph_crush.sort_osd_crush_location(location, None) 22 | 23 | def test_lower_than_two_bucket(self): 24 | location = [ 25 | ("chassis", "monchassis"), 26 | ] 27 | with pytest.raises(Exception): 28 | ceph_crush.sort_osd_crush_location(location, None) 29 | 30 | def test_invalid_bucket_type(self): 31 | location = [ 32 | ("host", "monhost"), 33 | ("chassis", "monchassis"), 34 | ("rackyyyyy", "monrack"), 35 | ] 36 | with pytest.raises(Exception): 37 | ceph_crush.sort_osd_crush_location(location, None) 38 | 39 | def test_ordering(self): 40 | expected_result = [ 41 | ("host", "monhost"), 42 | ("chassis", "monchassis"), 43 | ("rack", "monrack"), 44 | ("row", "marow"), 45 | ("pdu", "monpdu"), 46 | ("pod", "monpod"), 47 | ("room", "maroom"), 48 | ("datacenter", "mondc"), 49 | ("region", "maregion"), 50 | ("root", "maroute"), 51 | ] 52 | expected_result_reverse = expected_result[::-1] 53 | result = ceph_crush.sort_osd_crush_location( 54 | expected_result_reverse, None) 55 | assert expected_result == result 56 | 57 | def test_generate_commands(self): 58 | cluster = "test" 59 | expected_command_list = [ 60 | ['ceph', '--cluster', cluster, 'osd', 61 | 'crush', "add-bucket", "monhost", "host"], 62 | ['ceph', '--cluster', cluster, 'osd', 'crush', 63 | "add-bucket", "monchassis", "chassis"], 64 | ['ceph', '--cluster', cluster, 'osd', 'crush', 65 | "move", "monhost", "chassis=monchassis"], 66 | ['ceph', '--cluster', cluster, 'osd', 67 | 'crush', "add-bucket", "monrack", "rack"], 68 | ['ceph', '--cluster', cluster, 'osd', 'crush', 69 | "move", "monchassis", "rack=monrack"], 70 | ] 71 | 72 | location = [ 73 | ("host", "monhost"), 74 | ("chassis", "monchassis"), 75 | ("rack", "monrack"), 76 | ] 77 | 78 | crush_map = {"nodes": []} 79 | 80 | result = ceph_crush.create_and_move_buckets_list( 81 | cluster, location, crush_map) 82 | assert result == expected_command_list 83 | 84 | def test_generate_commands_container(self): 85 | cluster = "test" 86 | containerized = "docker exec -ti ceph-mon" 87 | expected_command_list = [ 88 | ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', 89 | cluster, 'osd', 'crush', "add-bucket", "monhost", "host"], 90 | ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', 91 | cluster, 'osd', 'crush', "add-bucket", "monchassis", "chassis"], 92 | ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 93 | 'osd', 'crush', "move", "monhost", "chassis=monchassis"], 94 | ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', 95 | cluster, 'osd', 'crush', "add-bucket", "monrack", "rack"], 96 | ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', 97 | cluster, 'osd', 'crush', "move", "monchassis", "rack=monrack"], 98 | ] 99 | 100 | location = [ 101 | ("host", "monhost"), 102 | ("chassis", "monchassis"), 103 | ("rack", "monrack"), 104 | ] 105 | 106 | # crush_map = {"nodes": [{"id": -4, "name": "monrack", "type": "rack", "type_id": 3, "children": [-3]}, 107 | # {"id": -3, "name": "monchassis", "type": "chassis", 108 | # "type_id": 2, "pool_weights": {}, "children": [-2]}, 109 | # {"id": -2, "name": "monhost", "type": "host", 110 | # "type_id": 1, "pool_weights": {}, "children": []}, 111 | # {"id": -1, "name": "default", "type": "root", "type_id": 11, "children": []}], 112 | # "stray": []} 113 | 114 | crush_map = {"nodes": []} 115 | 116 | result = ceph_crush.create_and_move_buckets_list( 117 | cluster, location, crush_map, containerized) 118 | assert result == expected_command_list 119 | -------------------------------------------------------------------------------- /tests/unit/modules/test_radosgw_zonegroup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from mock.mock import patch, MagicMock 3 | import pytest 4 | # sys.path.append('./library') 5 | from ansible_collections.ceph.automation.plugins.modules import radosgw_zonegroup 6 | 7 | 8 | fake_binary = 'radosgw-admin' 9 | fake_cluster = 'ceph' 10 | fake_container_binary = 'podman' 11 | fake_container_image = 'docker.io/ceph/daemon:latest' 12 | fake_container_cmd = [ 13 | fake_container_binary, 14 | 'run', 15 | '--rm', 16 | '--net=host', 17 | '-v', '/etc/ceph:/etc/ceph:z', 18 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 19 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 20 | '--entrypoint=' + fake_binary, 21 | fake_container_image 22 | ] 23 | fake_realm = 'foo' 24 | fake_zonegroup = 'bar' 25 | fake_endpoints = ['http://192.168.1.10:8080', 'http://192.168.1.11:8080'] 26 | fake_params = {'cluster': fake_cluster, 27 | 'name': fake_zonegroup, 28 | 'realm': fake_realm, 29 | 'endpoints': fake_endpoints, 30 | 'default': True, 31 | 'master': True} 32 | 33 | 34 | class TestRadosgwZonegroupModule(object): 35 | 36 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 37 | def test_container_exec(self): 38 | cmd = radosgw_zonegroup.container_exec(fake_binary, fake_container_image) 39 | assert cmd == fake_container_cmd 40 | 41 | def test_not_is_containerized(self): 42 | assert radosgw_zonegroup.is_containerized() is None 43 | 44 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 45 | def test_is_containerized(self): 46 | assert radosgw_zonegroup.is_containerized() == fake_container_image 47 | 48 | @pytest.mark.parametrize('image', [None, fake_container_image]) 49 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 50 | def test_pre_generate_radosgw_cmd(self, image): 51 | if image: 52 | expected_cmd = fake_container_cmd 53 | else: 54 | expected_cmd = [fake_binary] 55 | 56 | assert radosgw_zonegroup.pre_generate_radosgw_cmd(image) == expected_cmd 57 | 58 | @pytest.mark.parametrize('image', [None, fake_container_image]) 59 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 60 | def test_generate_radosgw_cmd(self, image): 61 | if image: 62 | expected_cmd = fake_container_cmd 63 | else: 64 | expected_cmd = [fake_binary] 65 | 66 | expected_cmd.extend([ 67 | '--cluster', 68 | fake_cluster, 69 | 'zonegroup' 70 | ]) 71 | assert radosgw_zonegroup.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd 72 | 73 | def test_create_zonegroup(self): 74 | fake_module = MagicMock() 75 | fake_module.params = fake_params 76 | expected_cmd = [ 77 | fake_binary, 78 | '--cluster', fake_cluster, 79 | 'zonegroup', 'create', 80 | '--rgw-realm=' + fake_realm, 81 | '--rgw-zonegroup=' + fake_zonegroup, 82 | '--endpoints=' + ','.join(fake_endpoints), 83 | '--default', 84 | '--master' 85 | ] 86 | 87 | assert radosgw_zonegroup.create_zonegroup(fake_module) == expected_cmd 88 | 89 | def test_modify_zonegroup(self): 90 | fake_module = MagicMock() 91 | fake_module.params = fake_params 92 | expected_cmd = [ 93 | fake_binary, 94 | '--cluster', fake_cluster, 95 | 'zonegroup', 'modify', 96 | '--rgw-realm=' + fake_realm, 97 | '--rgw-zonegroup=' + fake_zonegroup, 98 | '--endpoints=' + ','.join(fake_endpoints), 99 | '--default', 100 | '--master' 101 | ] 102 | 103 | assert radosgw_zonegroup.modify_zonegroup(fake_module) == expected_cmd 104 | 105 | def test_get_zonegroup(self): 106 | fake_module = MagicMock() 107 | fake_module.params = fake_params 108 | expected_cmd = [ 109 | fake_binary, 110 | '--cluster', fake_cluster, 111 | 'zonegroup', 'get', 112 | '--rgw-realm=' + fake_realm, 113 | '--rgw-zonegroup=' + fake_zonegroup, 114 | '--format=json' 115 | ] 116 | 117 | assert radosgw_zonegroup.get_zonegroup(fake_module) == expected_cmd 118 | 119 | def test_get_realm(self): 120 | fake_module = MagicMock() 121 | fake_module.params = fake_params 122 | expected_cmd = [ 123 | fake_binary, 124 | '--cluster', fake_cluster, 125 | 'realm', 'get', 126 | '--rgw-realm=' + fake_realm, 127 | '--format=json' 128 | ] 129 | 130 | assert radosgw_zonegroup.get_realm(fake_module) == expected_cmd 131 | 132 | def test_remove_zonegroup(self): 133 | fake_module = MagicMock() 134 | fake_module.params = fake_params 135 | expected_cmd = [ 136 | fake_binary, 137 | '--cluster', fake_cluster, 138 | 'zonegroup', 'delete', 139 | '--rgw-realm=' + fake_realm, 140 | '--rgw-zonegroup=' + fake_zonegroup 141 | ] 142 | 143 | assert radosgw_zonegroup.remove_zonegroup(fake_module) == expected_cmd 144 | -------------------------------------------------------------------------------- /plugins/modules/ceph_volume_simple_scan.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright 2020, Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | from __future__ import absolute_import, division, print_function 19 | __metaclass__ = type 20 | 21 | ANSIBLE_METADATA = { 22 | 'metadata_version': '1.1', 23 | 'status': ['preview'], 24 | 'supported_by': 'community' 25 | } 26 | 27 | DOCUMENTATION = ''' 28 | --- 29 | module: ceph_volume_simple_scan 30 | short_description: Scan legacy OSD with ceph-volume 31 | version_added: "1.1.0" 32 | description: 33 | - Scan legacy OSD with ceph-volume and store the output as JSON file in /etc/ceph/osd directory with {OSD_ID}-{OSD_FSID}.json format. 34 | options: 35 | cluster: 36 | description: 37 | - The ceph cluster name. 38 | type: str 39 | required: false 40 | default: ceph 41 | path: 42 | description: 43 | - The OSD directory or metadata partition. The directory or partition must exist. 44 | type: path 45 | required: false 46 | force: 47 | description: 48 | - Force re-scanning an OSD and overwriting the JSON content. 49 | type: bool 50 | required: false 51 | default: false 52 | stdout: 53 | description: 54 | - Do not store the output to JSON file but stdout instead. 55 | type: bool 56 | required: false 57 | default: false 58 | author: 59 | - Dimitri Savineau (@dsavineau) 60 | ''' 61 | 62 | EXAMPLES = ''' 63 | - name: scan all running OSDs 64 | ceph_volume_simple_scan: 65 | cluster: ceph 66 | 67 | - name: scan an OSD with the directory 68 | ceph_volume_simple_scan: 69 | cluster: ceph 70 | path: /var/lib/ceph/osd/ceph-3 71 | 72 | - name: scan an OSD with the partition 73 | ceph_volume_simple_scan: 74 | cluster: ceph 75 | path: /dev/sdb1 76 | 77 | - name: rescan an OSD and print the result on stdout 78 | ceph_volume_simple_scan: 79 | cluster: ceph 80 | path: /dev/nvme0n1p1 81 | force: true 82 | stdout: true 83 | ''' 84 | 85 | RETURN = '''# ''' 86 | 87 | from ansible.module_utils.basic import AnsibleModule 88 | try: 89 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module 90 | except ImportError: 91 | from module_utils.ceph_common import exit_module 92 | import datetime 93 | import os 94 | 95 | 96 | def main(): 97 | module = AnsibleModule( 98 | argument_spec=dict( 99 | cluster=dict(type='str', required=False, default='ceph'), 100 | path=dict(type='path', required=False), 101 | force=dict(type='bool', required=False, default=False), 102 | stdout=dict(type='bool', required=False, default=False), 103 | ), 104 | supports_check_mode=True, 105 | ) 106 | 107 | path = module.params.get('path') 108 | cluster = module.params.get('cluster') 109 | force = module.params.get('force') 110 | stdout = module.params.get('stdout') 111 | 112 | if path and not os.path.exists(path): 113 | module.fail_json(msg='{} does not exist'.format(path), rc=1) 114 | 115 | startd = datetime.datetime.now() 116 | 117 | container_image = os.getenv('CEPH_CONTAINER_IMAGE') 118 | container_binary = os.getenv('CEPH_CONTAINER_BINARY') 119 | if container_binary and container_image: 120 | cmd = [container_binary, 121 | 'run', '--rm', '--privileged', 122 | '--ipc=host', '--net=host', 123 | '-v', '/etc/ceph:/etc/ceph:z', 124 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 125 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 126 | '-v', '/run/lvm/:/run/lvm/', 127 | '-v', '/run/lock/lvm/:/run/lock/lvm/', 128 | '--entrypoint=ceph-volume', container_image] 129 | else: 130 | cmd = ['ceph-volume'] 131 | 132 | cmd.extend(['--cluster', cluster, 'simple', 'scan']) 133 | 134 | if force: 135 | cmd.append('--force') 136 | 137 | if stdout: 138 | cmd.append('--stdout') 139 | 140 | if path: 141 | cmd.append(path) 142 | 143 | if module.check_mode: 144 | exit_module( 145 | module=module, 146 | out='', 147 | rc=0, 148 | cmd=cmd, 149 | err='', 150 | startd=startd, 151 | changed=False 152 | ) 153 | else: 154 | rc, out, err = module.run_command(cmd) 155 | exit_module( 156 | module=module, 157 | out=out, 158 | rc=rc, 159 | cmd=cmd, 160 | err=err, 161 | startd=startd, 162 | changed=True 163 | ) 164 | 165 | 166 | if __name__ == '__main__': 167 | main() 168 | -------------------------------------------------------------------------------- /tests/unit/modules/test_radosgw_user.py: -------------------------------------------------------------------------------- 1 | import os 2 | from mock.mock import patch, MagicMock 3 | import pytest 4 | # sys.path.append('./library') 5 | from ansible_collections.ceph.automation.plugins.modules import radosgw_user 6 | 7 | 8 | fake_binary = 'radosgw-admin' 9 | fake_cluster = 'ceph' 10 | fake_container_binary = 'podman' 11 | fake_container_image = 'docker.io/ceph/daemon:latest' 12 | fake_container_cmd = [ 13 | fake_container_binary, 14 | 'run', 15 | '--rm', 16 | '--net=host', 17 | '-v', '/etc/ceph:/etc/ceph:z', 18 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 19 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 20 | '--entrypoint=' + fake_binary, 21 | fake_container_image 22 | ] 23 | fake_user = 'foo' 24 | fake_realm = 'canada' 25 | fake_zonegroup = 'quebec' 26 | fake_zone = 'montreal' 27 | fake_params = {'cluster': fake_cluster, 28 | 'name': fake_user, 29 | 'display_name': fake_user, 30 | 'email': fake_user, 31 | 'access_key': 'PC7NPg87QWhOzXTkXIhX', 32 | 'secret_key': 'jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', 33 | 'realm': fake_realm, 34 | 'zonegroup': fake_zonegroup, 35 | 'zone': fake_zone, 36 | 'system': True, 37 | 'admin': True} 38 | 39 | 40 | class TestRadosgwUserModule(object): 41 | 42 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 43 | def test_container_exec(self): 44 | cmd = radosgw_user.container_exec(fake_binary, fake_container_image) 45 | assert cmd == fake_container_cmd 46 | 47 | def test_not_is_containerized(self): 48 | assert radosgw_user.is_containerized() is None 49 | 50 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 51 | def test_is_containerized(self): 52 | assert radosgw_user.is_containerized() == fake_container_image 53 | 54 | @pytest.mark.parametrize('image', [None, fake_container_image]) 55 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 56 | def test_pre_generate_radosgw_cmd(self, image): 57 | if image: 58 | expected_cmd = fake_container_cmd 59 | else: 60 | expected_cmd = [fake_binary] 61 | 62 | assert radosgw_user.pre_generate_radosgw_cmd(image) == expected_cmd 63 | 64 | @pytest.mark.parametrize('image', [None, fake_container_image]) 65 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 66 | def test_generate_radosgw_cmd(self, image): 67 | if image: 68 | expected_cmd = fake_container_cmd 69 | else: 70 | expected_cmd = [fake_binary] 71 | 72 | expected_cmd.extend([ 73 | '--cluster', 74 | fake_cluster, 75 | 'user' 76 | ]) 77 | assert radosgw_user.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd 78 | 79 | def test_create_user(self): 80 | fake_module = MagicMock() 81 | fake_module.params = fake_params 82 | expected_cmd = [ 83 | fake_binary, 84 | '--cluster', fake_cluster, 85 | 'user', 'create', 86 | '--uid=' + fake_user, 87 | '--display_name=' + fake_user, 88 | '--email=' + fake_user, 89 | '--access-key=PC7NPg87QWhOzXTkXIhX', 90 | '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', 91 | '--rgw-realm=' + fake_realm, 92 | '--rgw-zonegroup=' + fake_zonegroup, 93 | '--rgw-zone=' + fake_zone, 94 | '--system', 95 | '--admin' 96 | ] 97 | 98 | assert radosgw_user.create_user(fake_module) == expected_cmd 99 | 100 | def test_modify_user(self): 101 | fake_module = MagicMock() 102 | fake_module.params = fake_params 103 | expected_cmd = [ 104 | fake_binary, 105 | '--cluster', fake_cluster, 106 | 'user', 'modify', 107 | '--uid=' + fake_user, 108 | '--display_name=' + fake_user, 109 | '--email=' + fake_user, 110 | '--access-key=PC7NPg87QWhOzXTkXIhX', 111 | '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', 112 | '--rgw-realm=' + fake_realm, 113 | '--rgw-zonegroup=' + fake_zonegroup, 114 | '--rgw-zone=' + fake_zone, 115 | '--system', 116 | '--admin' 117 | ] 118 | 119 | assert radosgw_user.modify_user(fake_module) == expected_cmd 120 | 121 | def test_get_user(self): 122 | fake_module = MagicMock() 123 | fake_module.params = fake_params 124 | expected_cmd = [ 125 | fake_binary, 126 | '--cluster', fake_cluster, 127 | 'user', 'info', 128 | '--uid=' + fake_user, 129 | '--format=json', 130 | '--rgw-realm=' + fake_realm, 131 | '--rgw-zonegroup=' + fake_zonegroup, 132 | '--rgw-zone=' + fake_zone 133 | ] 134 | 135 | assert radosgw_user.get_user(fake_module) == expected_cmd 136 | 137 | def test_remove_user(self): 138 | fake_module = MagicMock() 139 | fake_module.params = fake_params 140 | expected_cmd = [ 141 | fake_binary, 142 | '--cluster', fake_cluster, 143 | 'user', 'rm', 144 | '--uid=' + fake_user, 145 | '--rgw-realm=' + fake_realm, 146 | '--rgw-zonegroup=' + fake_zonegroup, 147 | '--rgw-zone=' + fake_zone 148 | ] 149 | 150 | assert radosgw_user.remove_user(fake_module) == expected_cmd 151 | -------------------------------------------------------------------------------- /.github/workflows/unit_source.yml: -------------------------------------------------------------------------------- 1 | name: Unit tests, dependencies from source 2 | on: 3 | workflow_call: 4 | inputs: 5 | collection_pre_install: 6 | required: false 7 | type: string 8 | default: "" 9 | matrix_exclude: 10 | # https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix 11 | # 2.15 supports Python 3.9-3.11 12 | # 2.16 supports Python 3.10-3.11 13 | # 2.17 supports Python 3.10-3.12 14 | # 2.18 supports Python 3.11-3.13 15 | # 2.19 supports Python 3.11-3.13 16 | # support for Python 3.13 added and 3.10 removed in 2.18 for control node 17 | # target node supported Python 3.8-3.13 as of 2.18 and 2.19 18 | # milestone is and devel is switched to 2.20 and drops support for Python 3.11 19 | # https://docs.ansible.com/ansible/devel/roadmap/ROADMAP_2_18.html 20 | default: >- 21 | [ 22 | { 23 | "ansible-version": "milestone", 24 | "python-version": "3.11" 25 | }, 26 | { 27 | "ansible-version": "devel", 28 | "python-version": "3.11" 29 | }, 30 | { 31 | "ansible-version": "stable-2.17", 32 | "python-version": "3.13" 33 | }, 34 | { 35 | "ansible-version": "stable-2.16", 36 | "python-version": "3.12" 37 | }, 38 | { 39 | "ansible-version": "stable-2.16", 40 | "python-version": "3.13" 41 | }, 42 | { 43 | "ansible-version": "stable-2.15", 44 | "python-version": "3.12" 45 | }, 46 | { 47 | "ansible-version": "stable-2.15", 48 | "python-version": "3.13" 49 | } 50 | ] 51 | required: false 52 | type: string 53 | 54 | jobs: 55 | unit_source: 56 | env: 57 | PY_COLORS: "1" 58 | source_directory: "./source" 59 | runs-on: ubuntu-latest 60 | strategy: 61 | fail-fast: false 62 | matrix: 63 | ansible-version: 64 | # ansible-core 2.15 reached EOL on November 2024 65 | - stable-2.16 66 | - stable-2.17 67 | - stable-2.18 68 | - stable-2.19 69 | - milestone 70 | - devel 71 | python-version: 72 | # 2.16 supports Python 3.10-3.11 73 | # 2.17 supports Python 3.10-3.12 74 | # 2.18 supports Python 3.11-3.13 75 | # - "3.10" 76 | - "3.11" 77 | - "3.12" 78 | - "3.13" 79 | exclude: ${{ fromJSON(inputs.matrix_exclude) }} 80 | continue-on-error: ${{ matrix.ansible-version == 'devel' }} 81 | 82 | name: "py${{ matrix.python-version }} / ${{ matrix.ansible-version }}" 83 | steps: 84 | - name: Checkout the repository 85 | uses: actions/checkout@v4 86 | with: 87 | fetch-depth: 0 88 | - name: Checkout the collection repository 89 | uses: ./.github/actions/checkout_dependency 90 | with: 91 | path: ${{ env.source_directory }} 92 | ref: ${{ github.event.pull_request.head.sha }} 93 | fetch-depth: "0" 94 | 95 | - name: Set up Python ${{ matrix.python-version }} 96 | uses: actions/setup-python@v4 97 | with: 98 | python-version: ${{ matrix.python-version }} 99 | 100 | # ansible-pylibssh does not have cp312 wheels 101 | # when building from sdist libssh-dev needs to be installed 102 | # extra install step starts 103 | - name: Install build toolchain and openssl headers on Linux 104 | shell: bash 105 | run: sudo apt update && sudo apt install build-essential libssl-dev 106 | if: ${{ matrix.python-version >= 3.12 }} 107 | 108 | - name: Install catchsegv and libssh headers on Linux for cythonize+coverage 109 | shell: bash 110 | run: sudo apt update && sudo apt install libssh-dev 111 | if: ${{ matrix.python-version >= 3.12 }} 112 | # extra install step ends 113 | 114 | - name: Install ansible-core (${{ matrix.ansible-version }}) 115 | run: python3 -m pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check 116 | 117 | - name: Pre install collections dependencies first so the collection install does not 118 | run: ansible-galaxy collection install --pre ${{ inputs.collection_pre_install }} -p /home/runner/collections 119 | if: inputs.collection_pre_install != '' 120 | 121 | - name: Read collection metadata from galaxy.yml 122 | id: identify 123 | uses: ./.github/actions/identify_collection 124 | with: 125 | source_path: ${{ env.source_directory }} 126 | 127 | - name: Build and install the collection 128 | uses: ./.github/actions/build_install_collection 129 | with: 130 | install_python_dependencies: true 131 | source_path: ${{ env.source_directory }} 132 | collection_path: ${{ steps.identify.outputs.collection_path }} 133 | tar_file: ${{ steps.identify.outputs.tar_file }} 134 | ansible_version: ${{ matrix.ansible-version }} 135 | 136 | - name: Print the ansible version 137 | run: ansible --version 138 | 139 | - name: Print the python dependencies 140 | run: python3 -m pip list 141 | 142 | - name: Run unit tests 143 | run: python -m pytest tests/unit --showlocals --ansible-host-pattern localhost 144 | working-directory: ${{ steps.identify.outputs.collection_path }} 145 | -------------------------------------------------------------------------------- /plugins/module_utils/ceph_common.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | __metaclass__ = type 3 | 4 | import datetime 5 | import os 6 | import time 7 | from typing import TYPE_CHECKING, Any, List, Dict, Callable, Type, TypeVar, Optional 8 | 9 | if TYPE_CHECKING: 10 | from ansible.module_utils.basic import AnsibleModule # type: ignore 11 | 12 | ExceptionType = TypeVar('ExceptionType', bound=BaseException) 13 | 14 | 15 | def generate_cmd(cmd='ceph', 16 | sub_cmd=None, 17 | args=None, 18 | user_key=None, 19 | cluster='ceph', 20 | user='client.admin', 21 | container_image=None, 22 | interactive=False): 23 | ''' 24 | Generate 'ceph' command line to execute 25 | ''' 26 | 27 | if user_key is None: 28 | user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user) 29 | 30 | cmd = pre_generate_cmd(cmd, container_image=container_image, interactive=interactive) # noqa: E501 31 | 32 | base_cmd = [ 33 | '-n', 34 | user, 35 | '-k', 36 | user_key, 37 | '--cluster', 38 | cluster 39 | ] 40 | 41 | if sub_cmd is not None: 42 | base_cmd.extend(sub_cmd) 43 | 44 | cmd.extend(base_cmd) if args is None else cmd.extend(base_cmd + args) 45 | 46 | return cmd 47 | 48 | 49 | def container_exec(binary, container_image, interactive=False): 50 | ''' 51 | Build the docker CLI to run a command inside a container 52 | ''' 53 | 54 | container_binary = os.getenv('CEPH_CONTAINER_BINARY') 55 | command_exec = [container_binary, 'run'] 56 | 57 | if interactive: 58 | command_exec.extend(['--interactive']) 59 | 60 | command_exec.extend(['--rm', 61 | '--net=host', 62 | '-v', '/etc/ceph:/etc/ceph:z', 63 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 64 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 65 | '--entrypoint=' + binary, container_image]) 66 | return command_exec 67 | 68 | 69 | def is_containerized(): 70 | ''' 71 | Check if we are running on a containerized cluster 72 | ''' 73 | 74 | if 'CEPH_CONTAINER_IMAGE' in os.environ: 75 | container_image = os.getenv('CEPH_CONTAINER_IMAGE') 76 | else: 77 | container_image = None 78 | 79 | return container_image 80 | 81 | 82 | def pre_generate_cmd(cmd, container_image=None, interactive=False): 83 | ''' 84 | Generate ceph prefix command 85 | ''' 86 | if container_image: 87 | cmd = container_exec(cmd, container_image, interactive=interactive) 88 | else: 89 | cmd = [cmd] 90 | 91 | return cmd 92 | 93 | 94 | def exec_command(module, cmd, stdin=None, check_rc=False): 95 | ''' 96 | Execute command(s) 97 | ''' 98 | 99 | binary_data = False 100 | if stdin: 101 | binary_data = True 102 | rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data, check_rc=check_rc) # noqa: E501 103 | 104 | return rc, cmd, out, err 105 | 106 | 107 | def retry(exceptions: Type[ExceptionType], module: "AnsibleModule", retries: int = 20, delay: int = 1) -> Callable: 108 | def decorator(f: Callable) -> Callable: 109 | def _retry(*args: Any, **kwargs: Any) -> Callable: 110 | _tries = retries 111 | while _tries > 1: 112 | try: 113 | module.debug(_tries) 114 | return f(*args, **kwargs) 115 | except exceptions: 116 | time.sleep(delay) 117 | _tries -= 1 118 | module.debug(f, " has failed after ", retries, " retries") 119 | return f(*args, **kwargs) 120 | return _retry 121 | return decorator 122 | 123 | 124 | def build_base_cmd(module: "AnsibleModule") -> List[str]: 125 | cmd = ['cephadm'] 126 | docker = module.params.get('docker') 127 | image = module.params.get('image') 128 | 129 | if docker: 130 | cmd.append('--docker') 131 | if image: 132 | cmd.extend(['--image', image]) 133 | 134 | return cmd 135 | 136 | 137 | def build_base_cmd_shell(module: "AnsibleModule") -> List[str]: 138 | cmd = build_base_cmd(module) 139 | fsid = module.params.get('fsid') 140 | 141 | cmd.append('shell') 142 | 143 | if fsid: 144 | cmd.extend(['--fsid', fsid]) 145 | 146 | return cmd 147 | 148 | 149 | def build_base_cmd_orch(module: "AnsibleModule") -> List[str]: 150 | cmd = build_base_cmd_shell(module) 151 | cmd.extend(['ceph', 'orch']) 152 | 153 | return cmd 154 | 155 | 156 | def exit_module(module: "AnsibleModule", 157 | rc: int, cmd: List[str], 158 | startd: datetime.datetime, 159 | out: str = '', 160 | err: str = '', 161 | changed: bool = False, 162 | diff: Optional[Dict[str, str]] = None) -> None: 163 | endd = datetime.datetime.now() 164 | delta = endd - startd 165 | 166 | result = dict( 167 | cmd=cmd, 168 | start=str(startd), 169 | end=str(endd), 170 | delta=str(delta), 171 | rc=rc, 172 | stdout=out.rstrip("\r\n"), 173 | stderr=err.rstrip("\r\n"), 174 | changed=changed, 175 | diff=diff 176 | ) 177 | module.exit_json(**result) 178 | 179 | 180 | def fatal(message: str, module: "AnsibleModule") -> None: 181 | ''' 182 | Report a fatal error and exit 183 | ''' 184 | 185 | if module: 186 | module.fail_json(msg=message, rc=1) 187 | else: 188 | raise Exception(message) 189 | -------------------------------------------------------------------------------- /plugins/modules/ceph_config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright Red Hat 5 | # SPDX-License-Identifier: Apache-2.0 6 | # Author: Guillaume Abrioux 7 | 8 | from __future__ import absolute_import, division, print_function 9 | __metaclass__ = type 10 | 11 | ANSIBLE_METADATA = { 12 | 'metadata_version': '1.1', 13 | 'status': ['preview'], 14 | 'supported_by': 'community' 15 | } 16 | 17 | DOCUMENTATION = ''' 18 | --- 19 | module: ceph_config 20 | short_description: set ceph config 21 | version_added: "1.1.0" 22 | description: 23 | - Set Ceph config options. 24 | options: 25 | fsid: 26 | description: 27 | - the fsid of the Ceph cluster to interact with. 28 | type: str 29 | required: false 30 | image: 31 | description: 32 | - The Ceph container image to use. 33 | type: str 34 | required: false 35 | action: 36 | description: 37 | - whether to get or set the parameter specified in 'option' 38 | type: str 39 | choices: ['get', 'set'] 40 | default: 'set' 41 | required: false 42 | who: 43 | description: 44 | - which daemon the configuration should be set to 45 | type: str 46 | required: true 47 | option: 48 | description: 49 | - name of the parameter to be set 50 | type: str 51 | required: true 52 | value: 53 | description: 54 | - value of the parameter 55 | type: str 56 | required: false 57 | 58 | author: 59 | - guillaume abrioux (@guits) 60 | ''' 61 | 62 | EXAMPLES = ''' 63 | - name: set osd_memory_target for osd.0 64 | ceph_config: 65 | action: set 66 | who: osd.0 67 | option: osd_memory_target 68 | value: 5368709120 69 | 70 | - name: set osd_memory_target for host ceph-osd-02 71 | ceph_config: 72 | action: set 73 | who: osd/host:ceph-osd-02 74 | option: osd_memory_target 75 | value: 5368709120 76 | 77 | - name: get osd_pool_default_size value 78 | ceph_config: 79 | action: get 80 | who: global 81 | option: osd_pool_default_size 82 | value: 1 83 | ''' 84 | 85 | RETURN = '''# ''' 86 | 87 | from typing import Any, Dict, List, Tuple, Union 88 | from ansible.module_utils.basic import AnsibleModule # type: ignore 89 | try: 90 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module, build_base_cmd_shell, fatal # type: ignore 91 | except ImportError: 92 | from module_utils.ceph_common import exit_module, build_base_cmd_shell, fatal # type: ignore 93 | 94 | import datetime 95 | import json 96 | 97 | 98 | def set_option(module: "AnsibleModule", 99 | who: str, 100 | option: str, 101 | value: str) -> Tuple[int, List[str], str, str]: 102 | cmd = build_base_cmd_shell(module) 103 | cmd.extend(['ceph', 'config', 'set', who, option, value]) 104 | 105 | rc, out, err = module.run_command(cmd) 106 | 107 | return rc, cmd, out.strip(), err 108 | 109 | 110 | def get_config_dump(module: "AnsibleModule") -> Tuple[int, List[str], str, str]: 111 | cmd = build_base_cmd_shell(module) 112 | cmd.extend(['ceph', 'config', 'dump', '--format', 'json']) 113 | rc, out, err = module.run_command(cmd) 114 | if rc: 115 | fatal(message=f"Can't get current configuration via `ceph config dump`.Error:\n{err}", module=module) 116 | out = out.strip() 117 | return rc, cmd, out, err 118 | 119 | 120 | def get_current_value(who: str, option: str, config_dump: List[Dict[str, Any]]) -> Union[str, None]: 121 | for config in config_dump: 122 | if config['section'] == who and config['name'] == option: 123 | return config['value'] 124 | return None 125 | 126 | 127 | def main() -> None: 128 | module = AnsibleModule( 129 | argument_spec=dict( 130 | who=dict(type='str', required=True), 131 | action=dict(type='str', required=False, choices=['get', 'set'], default='set'), 132 | option=dict(type='str', required=True), 133 | value=dict(type='str', required=False), 134 | fsid=dict(type='str', required=False), 135 | image=dict(type='str', required=False) 136 | ), 137 | supports_check_mode=True, 138 | required_if=[['action', 'set', ['value']]] 139 | ) 140 | 141 | # Gather module parameters in variables 142 | who = module.params.get('who') 143 | option = module.params.get('option') 144 | value = module.params.get('value') 145 | action = module.params.get('action') 146 | 147 | if module.check_mode: 148 | module.exit_json( 149 | changed=False, 150 | stdout='', 151 | cmd=[], 152 | stderr='', 153 | rc=0, 154 | start='', 155 | end='', 156 | delta='', 157 | ) 158 | 159 | startd = datetime.datetime.now() 160 | changed = False 161 | 162 | rc, cmd, out, err = get_config_dump(module) 163 | config_dump = json.loads(out) 164 | current_value = get_current_value(who, option, config_dump) 165 | 166 | if action == 'set': 167 | if value.lower() == current_value: 168 | out = 'who={} option={} value={} already set. Skipping.'.format(who, option, value) 169 | else: 170 | rc, cmd, out, err = set_option(module, who, option, value) 171 | changed = True 172 | else: 173 | if current_value is None: 174 | out = '' 175 | err = 'No value found for who={} option={}'.format(who, option) 176 | else: 177 | out = current_value 178 | 179 | exit_module(module=module, out=out, rc=rc, 180 | cmd=cmd, err=err, startd=startd, 181 | changed=changed) 182 | 183 | 184 | if __name__ == '__main__': 185 | main() 186 | -------------------------------------------------------------------------------- /plugins/modules/cephadm_adopt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright 2020, Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | from __future__ import absolute_import, division, print_function 19 | __metaclass__ = type 20 | 21 | ANSIBLE_METADATA = { 22 | 'metadata_version': '1.1', 23 | 'status': ['preview'], 24 | 'supported_by': 'community' 25 | } 26 | 27 | DOCUMENTATION = ''' 28 | --- 29 | module: cephadm_adopt 30 | short_description: Adopt a Ceph cluster with cephadm 31 | version_added: "1.1.0" 32 | description: 33 | - Adopt a Ceph cluster with cephadm 34 | options: 35 | name: 36 | description: 37 | - The ceph daemon name. 38 | type: str 39 | required: true 40 | cluster: 41 | description: 42 | - The ceph cluster name. 43 | type: str 44 | required: false 45 | default: ceph 46 | style: 47 | description: 48 | - Cep deployment style. 49 | type: str 50 | required: false 51 | default: legacy 52 | image: 53 | description: 54 | - Ceph container image. 55 | type: str 56 | required: false 57 | docker: 58 | description: 59 | - Use docker instead of podman. 60 | type: bool 61 | required: false 62 | default: false 63 | pull: 64 | description: 65 | - Pull the Ceph container image. 66 | type: bool 67 | required: false 68 | default: true 69 | firewalld: 70 | description: 71 | - Manage firewall rules with firewalld. 72 | type: bool 73 | required: false 74 | default: true 75 | author: 76 | - Dimitri Savineau (@dsavineau) 77 | ''' 78 | 79 | EXAMPLES = ''' 80 | - name: adopt a ceph monitor with cephadm (default values) 81 | cephadm_adopt: 82 | name: mon.foo 83 | style: legacy 84 | 85 | - name: adopt a ceph monitor with cephadm (with custom values) 86 | cephadm_adopt: 87 | name: mon.foo 88 | style: legacy 89 | image: quay.io/ceph/daemon-base:latest-main-devel 90 | pull: false 91 | firewalld: false 92 | 93 | - name: adopt a ceph monitor with cephadm with custom image via env var 94 | cephadm_adopt: 95 | name: mon.foo 96 | style: legacy 97 | environment: 98 | CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel 99 | ''' 100 | 101 | RETURN = '''# ''' 102 | 103 | from ansible.module_utils.basic import AnsibleModule # type: ignore 104 | try: 105 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module # type: ignore 106 | except ImportError: 107 | from module_utils.ceph_common import exit_module 108 | import datetime 109 | import json 110 | 111 | 112 | def main(): 113 | module = AnsibleModule( 114 | argument_spec=dict( 115 | name=dict(type='str', required=True), 116 | cluster=dict(type='str', required=False, default='ceph'), 117 | style=dict(type='str', required=False, default='legacy'), 118 | image=dict(type='str', required=False), 119 | docker=dict(type='bool', required=False, default=False), 120 | pull=dict(type='bool', required=False, default=True), 121 | firewalld=dict(type='bool', required=False, default=True), 122 | ), 123 | supports_check_mode=True, 124 | ) 125 | 126 | name = module.params.get('name') 127 | cluster = module.params.get('cluster') 128 | style = module.params.get('style') 129 | docker = module.params.get('docker') 130 | image = module.params.get('image') 131 | pull = module.params.get('pull') 132 | firewalld = module.params.get('firewalld') 133 | 134 | startd = datetime.datetime.now() 135 | rc = 0 136 | err = '' 137 | 138 | cmd = ['cephadm', 'ls', '--no-detail'] 139 | 140 | if module.check_mode: 141 | exit_module( 142 | module=module, 143 | out='', 144 | rc=0, 145 | cmd=cmd, 146 | err='', 147 | startd=startd, 148 | changed=False 149 | ) 150 | else: 151 | rc, out, err = module.run_command(cmd) 152 | 153 | if rc == 0: 154 | if name in [x["name"] for x in json.loads(out) if x["style"] == "cephadm:v1"]: # noqa: E501 155 | exit_module( 156 | module=module, 157 | out='{} is already adopted'.format(name), 158 | rc=0, 159 | cmd=cmd, 160 | err='', 161 | startd=startd, 162 | changed=False 163 | ) 164 | else: 165 | module.fail_json(msg=err, rc=rc) 166 | 167 | cmd = ['cephadm'] 168 | 169 | if docker: 170 | cmd.append('--docker') 171 | 172 | if image: 173 | cmd.extend(['--image', image]) 174 | 175 | cmd.extend(['adopt', '--cluster', cluster, '--name', name, '--style', style]) # noqa: E501 176 | 177 | if not pull: 178 | cmd.append('--skip-pull') 179 | 180 | if not firewalld: 181 | cmd.append('--skip-firewalld') 182 | 183 | rc, out, err = module.run_command(cmd) 184 | exit_module( 185 | module=module, 186 | out=out, 187 | rc=rc, 188 | cmd=cmd, 189 | err=err, 190 | startd=startd, 191 | changed=True 192 | ) 193 | 194 | 195 | if __name__ == '__main__': 196 | main() 197 | -------------------------------------------------------------------------------- /tests/unit/module_utils/test_ca_common.py: -------------------------------------------------------------------------------- 1 | from mock.mock import patch, MagicMock 2 | import os 3 | from ansible_collections.ceph.automation.plugins.module_utils import ceph_common 4 | import pytest 5 | 6 | fake_container_binary = 'podman' 7 | fake_container_image = 'docker.io/ceph/daemon:latest' 8 | 9 | 10 | class TestCommon(object): 11 | 12 | def setup_method(self): 13 | self.fake_binary = 'ceph' 14 | self.fake_cluster = 'ceph' 15 | self.fake_container_cmd = [ 16 | fake_container_binary, 17 | 'run', 18 | '--rm', 19 | '--net=host', 20 | '-v', '/etc/ceph:/etc/ceph:z', 21 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 22 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 23 | '--entrypoint=' + self.fake_binary, 24 | fake_container_image 25 | ] 26 | 27 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 28 | def test_container_exec(self): 29 | cmd = ceph_common.container_exec(self.fake_binary, fake_container_image) 30 | assert cmd == self.fake_container_cmd 31 | 32 | def test_not_is_containerized(self): 33 | assert ceph_common.is_containerized() is None 34 | 35 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 36 | def test_is_containerized(self): 37 | assert ceph_common.is_containerized() == fake_container_image 38 | 39 | @pytest.mark.parametrize('image', [None, fake_container_image]) 40 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 41 | def test_pre_generate_cmd(self, image): 42 | if image: 43 | expected_cmd = self.fake_container_cmd 44 | else: 45 | expected_cmd = [self.fake_binary] 46 | 47 | assert ceph_common.pre_generate_cmd(self.fake_binary, image) == expected_cmd # noqa: E501 48 | 49 | @pytest.mark.parametrize('image', [None, fake_container_image]) 50 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 51 | def test_generate_cmd(self, image): 52 | sub_cmd = ['osd', 'pool'] 53 | args = ['create', 'foo'] 54 | if image: 55 | expected_cmd = self.fake_container_cmd 56 | else: 57 | expected_cmd = [self.fake_binary] 58 | 59 | expected_cmd.extend([ 60 | '-n', 'client.admin', 61 | '-k', '/etc/ceph/ceph.client.admin.keyring', 62 | '--cluster', 63 | self.fake_cluster, 64 | 'osd', 'pool', 65 | 'create', 'foo' 66 | ]) 67 | assert ceph_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster=self.fake_cluster, container_image=image) == expected_cmd # noqa: E501 68 | 69 | @pytest.mark.parametrize('image', [None, fake_container_image]) 70 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 71 | def test_generate_cmd_different_cluster_name(self, image): 72 | sub_cmd = ['osd', 'pool'] 73 | args = ['create', 'foo'] 74 | if image: 75 | expected_cmd = self.fake_container_cmd 76 | else: 77 | expected_cmd = [self.fake_binary] 78 | 79 | expected_cmd.extend([ 80 | '-n', 'client.admin', 81 | '-k', '/etc/ceph/foo.client.admin.keyring', 82 | '--cluster', 83 | 'foo', 84 | 'osd', 'pool', 85 | 'create', 'foo' 86 | ]) 87 | result = ceph_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster='foo', container_image=image) # noqa: E501 88 | assert result == expected_cmd 89 | 90 | @pytest.mark.parametrize('image', [None, fake_container_image]) 91 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 92 | def test_generate_cmd_different_cluster_name_and_user(self, image): 93 | sub_cmd = ['osd', 'pool'] 94 | args = ['create', 'foo'] 95 | if image: 96 | expected_cmd = self.fake_container_cmd 97 | else: 98 | expected_cmd = [self.fake_binary] 99 | 100 | expected_cmd.extend([ 101 | '-n', 'client.foo', 102 | '-k', '/etc/ceph/foo.client.foo.keyring', 103 | '--cluster', 104 | 'foo', 105 | 'osd', 'pool', 106 | 'create', 'foo' 107 | ]) 108 | result = ceph_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster='foo', user='client.foo', container_image=image) # noqa: E501 109 | assert result == expected_cmd 110 | 111 | @pytest.mark.parametrize('image', [None, fake_container_image]) 112 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 113 | def test_generate_cmd_different_user(self, image): 114 | sub_cmd = ['osd', 'pool'] 115 | args = ['create', 'foo'] 116 | if image: 117 | expected_cmd = self.fake_container_cmd 118 | else: 119 | expected_cmd = [self.fake_binary] 120 | 121 | expected_cmd.extend([ 122 | '-n', 'client.foo', 123 | '-k', '/etc/ceph/ceph.client.foo.keyring', 124 | '--cluster', 125 | 'ceph', 126 | 'osd', 'pool', 127 | 'create', 'foo' 128 | ]) 129 | result = ceph_common.generate_cmd(sub_cmd=sub_cmd, args=args, user='client.foo', container_image=image) # noqa: E501 130 | assert result == expected_cmd 131 | 132 | @pytest.mark.parametrize('stdin', [None, 'foo']) 133 | def test_exec_command(self, stdin): 134 | fake_module = MagicMock() 135 | rc = 0 136 | stderr = '' 137 | stdout = 'ceph version 1.2.3' 138 | fake_module.run_command.return_value = 0, stdout, stderr 139 | expected_cmd = [self.fake_binary, '--version'] 140 | _rc, _cmd, _out, _err = ceph_common.exec_command(fake_module, expected_cmd, stdin=stdin) # noqa: E501 141 | assert _rc == rc 142 | assert _cmd == expected_cmd 143 | assert _err == stderr 144 | assert _out == stdout 145 | -------------------------------------------------------------------------------- /plugins/modules/ceph_key_list.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018, Red Hat, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import, division, print_function 16 | __metaclass__ = type 17 | 18 | 19 | ANSIBLE_METADATA = { 20 | 'metadata_version': '1.1', 21 | 'status': ['preview'], 22 | 'supported_by': 'community' 23 | } 24 | 25 | DOCUMENTATION = ''' 26 | --- 27 | module: ceph_key_list 28 | 29 | author: Teoman ONAY (@asM0deuz) 30 | 31 | short_description: Manage Cephx key(s) 32 | 33 | version_added: "1.1.0" 34 | 35 | description: 36 | - Manage CephX creation, deletion and updates. It can also list and get information about keyring(s). 37 | options: 38 | cluster: 39 | description: 40 | - The ceph cluster name. 41 | required: false 42 | type: str 43 | default: ceph 44 | user: 45 | description: 46 | - entity used to perform operation. It corresponds to the -n option (--name) 47 | type: str 48 | required: false 49 | default: 'client.admin' 50 | user_key: 51 | description: 52 | - the path to the keyring corresponding to the user being used. It corresponds to the -k option (--keyring) 53 | type: str 54 | required: false 55 | mode: 56 | description: 57 | - N/A 58 | type: raw 59 | owner: 60 | description: 61 | - N/A 62 | type: str 63 | group: 64 | description: 65 | - N/A 66 | type: str 67 | seuser: 68 | description: 69 | - N/A 70 | type: str 71 | serole: 72 | description: 73 | - N/A 74 | type: str 75 | selevel: 76 | description: 77 | - N/A 78 | type: str 79 | setype: 80 | description: 81 | - N/A 82 | type: str 83 | attributes: 84 | description: 85 | - N/A 86 | type: str 87 | aliases: 88 | - attr 89 | unsafe_writes: 90 | description: 91 | - N/A 92 | type: bool 93 | default: false 94 | ''' 95 | 96 | EXAMPLES = ''' 97 | - name: list cephx keys 98 | ceph_key_list: 99 | ''' 100 | 101 | RETURN = '''# ''' 102 | 103 | 104 | try: 105 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import generate_cmd, \ 106 | is_containerized 107 | except ImportError: 108 | from module_utils.ceph_common import generate_cmd, \ 109 | is_containerized 110 | try: 111 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_key_common import exec_commands 112 | except ImportError: 113 | from module_utils.ceph_key_common import exec_commands 114 | 115 | import os 116 | import datetime 117 | from ansible.module_utils.basic import AnsibleModule 118 | 119 | 120 | def list_keys(cluster, user, user_key, container_image=None): 121 | ''' 122 | List all CephX keys 123 | ''' 124 | 125 | cmd_list = [] 126 | 127 | args = [ 128 | 'ls', 129 | '-f', 130 | 'json', 131 | ] 132 | 133 | cmd_list.append(generate_cmd(sub_cmd=['auth'], 134 | args=args, 135 | cluster=cluster, 136 | user=user, 137 | user_key=user_key, 138 | container_image=container_image)) 139 | 140 | return cmd_list 141 | 142 | 143 | def run_module(): 144 | module_args = dict( 145 | cluster=dict(type='str', required=False, default='ceph'), 146 | user=dict(type='str', required=False, default='client.admin'), 147 | user_key=dict(type='str', required=False, default=None, no_log=False), 148 | ) 149 | 150 | module = AnsibleModule( 151 | argument_spec=module_args, 152 | supports_check_mode=True, 153 | add_file_common_args=True, 154 | ) 155 | 156 | # Gather module parameters in variables 157 | cluster = module.params.get('cluster') 158 | user = module.params.get('user') 159 | user_key = module.params.get('user_key') 160 | 161 | changed = False 162 | cmd = '' 163 | rc = 0 164 | out = '' 165 | err = '' 166 | 167 | result = dict( 168 | changed=changed, 169 | stdout='', 170 | stderr='', 171 | rc=0, 172 | start='', 173 | end='', 174 | delta='', 175 | ) 176 | 177 | if module.check_mode: 178 | module.exit_json(**result) 179 | 180 | startd = datetime.datetime.now() 181 | 182 | # will return either the image name or None 183 | container_image = is_containerized() 184 | 185 | if not user_key: 186 | user_key_filename = '{}.{}.keyring'.format(cluster, user) 187 | user_key_dir = '/etc/ceph' 188 | user_key_path = os.path.join(user_key_dir, user_key_filename) 189 | else: 190 | user_key_path = user_key 191 | 192 | rc, cmd, out, err = exec_commands( 193 | module, list_keys(cluster, user, user_key_path, container_image)) 194 | 195 | endd = datetime.datetime.now() 196 | delta = endd - startd 197 | 198 | result = dict( 199 | cmd=cmd, 200 | start=str(startd), 201 | end=str(endd), 202 | delta=str(delta), 203 | rc=rc, 204 | stdout=out.rstrip("\r\n"), 205 | stderr=err.rstrip("\r\n"), 206 | changed=changed, 207 | ) 208 | 209 | if rc != 0: 210 | module.fail_json(msg='non-zero return code', **result) 211 | 212 | module.exit_json(**result) 213 | 214 | 215 | def main(): 216 | run_module() 217 | 218 | 219 | if __name__ == '__main__': 220 | main() 221 | -------------------------------------------------------------------------------- /.github/actions/checkout_dependency/test_resolve_dependency.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Module used to test resolve_dependency.py script.""" 3 | 4 | import os 5 | import string 6 | 7 | from pathlib import PosixPath 8 | from random import choice 9 | from unittest.mock import MagicMock 10 | from unittest.mock import patch 11 | 12 | import pytest 13 | 14 | from resolve_dependency import get_pr_merge_commit_sha 15 | from resolve_dependency import main 16 | from resolve_dependency import resolve_ref 17 | 18 | 19 | @pytest.mark.parametrize( 20 | "pr_body,match", 21 | [ 22 | ("Depends-On: https://github.com/my_org/my_collection/pull/12345", True), 23 | ( 24 | "Depends-On: https://github.com/my_org/my_collection/pull/12345\n" 25 | "Depends-On: https://github.com/my_org/my_collection/pull/67890", 26 | True, 27 | ), 28 | ( 29 | "Depends-On: https://github.com/another_org/my_collection/pull/4000\n" 30 | "Depends-On: https://github.com/my_org/my_collection/pull/12345", 31 | True, 32 | ), 33 | ( 34 | "Depends-On: https://github.com/my_org/my_collection/pull/12345\n" 35 | "Depends-On: https://github.com/my_org/my_collection/pull/67890", 36 | True, 37 | ), 38 | ("Depends-On: https://github.com/another_org/my_collection/pull/12345", False), 39 | ("Depends-On: https://github.com/my_org/my_collection2/pull/12345", False), 40 | ("Depends-On: https://github.com/my_org/my_collection/pull", False), 41 | ], 42 | ) 43 | def test_resolve_ref(pr_body: str, match: bool) -> None: 44 | """Test resolve_ref function. 45 | 46 | :param pr_body: pull request body 47 | :param match: whether a depends-on should be found or not 48 | """ 49 | expected = 12345 if match else 0 50 | assert resolve_ref(pr_body, "my_org/my_collection") == expected 51 | 52 | 53 | class FakePullRequest: 54 | # pylint: disable=too-few-public-methods 55 | """Class to simulate PullRequest Object.""" 56 | 57 | def __init__(self, mergeable: bool) -> None: 58 | """Class constructor. 59 | 60 | :param mergeable: whether the pull request is mergeable or not 61 | """ 62 | self.mergeable = mergeable 63 | self.merge_commit_sha = self.generate_commit_sha() 64 | 65 | @staticmethod 66 | def generate_commit_sha(length: int = 16) -> str: 67 | """Generate random commit sha. 68 | 69 | :param length: The length of the generated string 70 | :returns: The generated commit sha 71 | """ 72 | data = string.ascii_letters + string.digits 73 | return "".join([choice(data) for _ in range(length)]) 74 | 75 | 76 | @pytest.mark.parametrize("mergeable", [True, False]) 77 | @patch("resolve_dependency.Github") 78 | def test_get_pr_merge_commit_sha(m_github: MagicMock, mergeable: bool) -> None: 79 | """Test get_pr_merge_commit_sha function. 80 | 81 | :param m_github: The github module 82 | :param mergeable: whether the pull request is mergeable or not 83 | """ 84 | github_obj = MagicMock() 85 | m_github.return_value = github_obj 86 | 87 | os.environ["GITHUB_TOKEN"] = "unittest_github_token" 88 | 89 | m_github_repo = MagicMock() 90 | github_obj.get_repo = MagicMock() 91 | github_obj.get_repo.return_value = m_github_repo 92 | 93 | local_pr = FakePullRequest(mergeable=mergeable) 94 | m_github_repo.get_pull = MagicMock() 95 | m_github_repo.get_pull.return_value = local_pr 96 | 97 | repository = "my_testing_repository" 98 | pr_number = 12345 99 | 100 | if mergeable: 101 | assert get_pr_merge_commit_sha(repository, pr_number) == local_pr.merge_commit_sha 102 | else: 103 | with pytest.raises(ValueError): 104 | get_pr_merge_commit_sha(repository, pr_number) 105 | 106 | m_github.assert_called_once_with("unittest_github_token") 107 | github_obj.get_repo.assert_called_once_with(repository) 108 | m_github_repo.get_pull.assert_called_once_with(pr_number) 109 | 110 | 111 | @pytest.mark.parametrize("repository", [True, False]) 112 | @pytest.mark.parametrize("resolve_ref_pr", [0, 1]) 113 | @patch("resolve_dependency.get_pr_merge_commit_sha") 114 | @patch("resolve_dependency.resolve_ref") 115 | def test_main( 116 | m_resolve_ref: MagicMock, 117 | m_get_pr_merge_commit_sha: MagicMock, 118 | repository: bool, 119 | resolve_ref_pr: int, 120 | tmp_path: PosixPath, 121 | ) -> None: 122 | """Test main function. 123 | 124 | :param m_resolve_ref: The resolve_ref mock function 125 | :param m_get_pr_merge_commit_sha: The get_pr_merge_commit_sha mock function 126 | :param repository: whether the repository is defined on environment variable or not 127 | :param resolve_ref_pr: The pull request number 128 | :param tmp_path: The temporary path for file to create for test 129 | """ 130 | pr_body = "My pull request body - this is a sample for unit tests" 131 | repository_name = "my_test_repository" 132 | os.environ["RESOLVE_REF_PR_BODY"] = pr_body 133 | 134 | gh_output_file = tmp_path / "github_output.txt" 135 | env_update = {"GITHUB_OUTPUT": str(gh_output_file)} 136 | if repository: 137 | env_update.update({"RESOLVE_REF_REPOSITORY": repository_name}) 138 | 139 | m_resolve_ref.return_value = resolve_ref_pr 140 | merge_commit_sha = FakePullRequest.generate_commit_sha() 141 | m_get_pr_merge_commit_sha.return_value = merge_commit_sha 142 | 143 | with patch.dict(os.environ, env_update): 144 | main() 145 | 146 | if not repository: 147 | m_resolve_ref.assert_not_called() 148 | m_get_pr_merge_commit_sha.assert_not_called() 149 | assert not gh_output_file.exists() 150 | elif not resolve_ref_pr: 151 | m_resolve_ref.assert_called_once_with(pr_body, repository_name) 152 | m_get_pr_merge_commit_sha.assert_not_called() 153 | assert not gh_output_file.exists() 154 | else: 155 | m_resolve_ref.assert_called_once_with(pr_body, repository_name) 156 | m_get_pr_merge_commit_sha.assert_called_once_with(repository_name, resolve_ref_pr) 157 | assert gh_output_file.exists() 158 | # gh_output_file.read_text() == f"merge_commit_sha={merge_commit_sha}\n" 159 | -------------------------------------------------------------------------------- /plugins/modules/ceph_volume_simple_activate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright 2020, Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | from __future__ import absolute_import, division, print_function 19 | __metaclass__ = type 20 | 21 | ANSIBLE_METADATA = { 22 | 'metadata_version': '1.1', 23 | 'status': ['preview'], 24 | 'supported_by': 'community' 25 | } 26 | 27 | DOCUMENTATION = ''' 28 | --- 29 | module: ceph_volume_simple_activate 30 | short_description: Activate legacy OSD with ceph-volume 31 | version_added: "1.1.0" 32 | description: 33 | - Activate legacy OSD with ceph-volume by providing the JSON file from 34 | the scan operation or by passing the OSD ID and OSD FSID. 35 | options: 36 | cluster: 37 | description: 38 | - The ceph cluster name. 39 | type: str 40 | required: false 41 | default: ceph 42 | path: 43 | description: 44 | - The OSD metadata as JSON file in /etc/ceph/osd directory, it must exist. 45 | type: path 46 | required: false 47 | osd_id: 48 | description: 49 | - The legacy OSD ID. 50 | type: str 51 | required: false 52 | osd_fsid: 53 | description: 54 | - The legacy OSD FSID. 55 | type: str 56 | required: false 57 | osd_all: 58 | description: 59 | - Activate all legacy OSDs. 60 | type: bool 61 | required: false 62 | systemd: 63 | description: 64 | - Using systemd unit during the OSD activation. 65 | type: bool 66 | required: false 67 | default: true 68 | author: 69 | - Dimitri Savineau (@dsavineau) 70 | ''' 71 | 72 | EXAMPLES = ''' 73 | - name: activate all legacy OSDs 74 | ceph_volume_simple_activate: 75 | cluster: ceph 76 | osd_all: true 77 | 78 | - name: activate a legacy OSD via OSD ID and OSD FSID 79 | ceph_volume_simple_activate: 80 | cluster: ceph 81 | osd_id: 3 82 | osd_fsid: 0c4a7eca-0c2a-4c12-beff-08a80f064c52 83 | 84 | - name: activate a legacy OSD via the JSON file 85 | ceph_volume_simple_activate: 86 | cluster: ceph 87 | path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json 88 | 89 | - name: activate a legacy OSD via the JSON file without systemd 90 | ceph_volume_simple_activate: 91 | cluster: ceph 92 | path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json 93 | systemd: false 94 | ''' 95 | 96 | RETURN = '''# ''' 97 | 98 | from ansible.module_utils.basic import AnsibleModule 99 | try: 100 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import exit_module 101 | except ImportError: 102 | from module_utils.ceph_common import exit_module 103 | import datetime 104 | import os 105 | 106 | 107 | def main(): 108 | module = AnsibleModule( 109 | argument_spec=dict( 110 | cluster=dict(type='str', required=False, default='ceph'), 111 | path=dict(type='path', required=False), 112 | systemd=dict(type='bool', required=False, default=True), 113 | osd_id=dict(type='str', required=False), 114 | osd_fsid=dict(type='str', required=False), 115 | osd_all=dict(type='bool', required=False), 116 | ), 117 | supports_check_mode=True, 118 | mutually_exclusive=[ 119 | ('osd_all', 'osd_id'), 120 | ('osd_all', 'osd_fsid'), 121 | ('path', 'osd_id'), 122 | ('path', 'osd_fsid'), 123 | ], 124 | required_together=[ 125 | ('osd_id', 'osd_fsid') 126 | ], 127 | required_one_of=[ 128 | ('path', 'osd_id', 'osd_all'), 129 | ('path', 'osd_fsid', 'osd_all'), 130 | ], 131 | ) 132 | 133 | path = module.params.get('path') 134 | cluster = module.params.get('cluster') 135 | systemd = module.params.get('systemd') 136 | osd_id = module.params.get('osd_id') 137 | osd_fsid = module.params.get('osd_fsid') 138 | osd_all = module.params.get('osd_all') 139 | 140 | if path and not os.path.exists(path): 141 | module.fail_json(msg='{} does not exist'.format(path), rc=1) 142 | 143 | startd = datetime.datetime.now() 144 | 145 | container_image = os.getenv('CEPH_CONTAINER_IMAGE') 146 | container_binary = os.getenv('CEPH_CONTAINER_BINARY') 147 | if container_binary and container_image: 148 | cmd = [container_binary, 149 | 'run', '--rm', '--privileged', 150 | '--ipc=host', '--net=host', 151 | '-v', '/etc/ceph:/etc/ceph:z', 152 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 153 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 154 | '-v', '/run/lvm/:/run/lvm/', 155 | '-v', '/run/lock/lvm/:/run/lock/lvm/', 156 | '--entrypoint=ceph-volume', container_image] 157 | else: 158 | cmd = ['ceph-volume'] 159 | 160 | cmd.extend(['--cluster', cluster, 'simple', 'activate']) 161 | 162 | if osd_all: 163 | cmd.append('--all') 164 | else: 165 | if path: 166 | cmd.extend(['--file', path]) 167 | else: 168 | cmd.extend([osd_id, osd_fsid]) 169 | 170 | if not systemd: 171 | cmd.append('--no-systemd') 172 | 173 | if module.check_mode: 174 | exit_module( 175 | module=module, 176 | out='', 177 | rc=0, 178 | cmd=cmd, 179 | err='', 180 | startd=startd, 181 | changed=False 182 | ) 183 | else: 184 | rc, out, err = module.run_command(cmd) 185 | exit_module( 186 | module=module, 187 | out=out, 188 | rc=rc, 189 | cmd=cmd, 190 | err=err, 191 | startd=startd, 192 | changed=True 193 | ) 194 | 195 | 196 | if __name__ == '__main__': 197 | main() 198 | -------------------------------------------------------------------------------- /plugins/modules/ceph_orch_daemon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright Red Hat 5 | # SPDX-License-Identifier: Apache-2.0 6 | # Author: Guillaume Abrioux 7 | 8 | from __future__ import absolute_import, division, print_function 9 | __metaclass__ = type 10 | 11 | ANSIBLE_METADATA = { 12 | 'metadata_version': '1.1', 13 | 'status': ['preview'], 14 | 'supported_by': 'community' 15 | } 16 | 17 | DOCUMENTATION = ''' 18 | --- 19 | module: ceph_orch_daemon 20 | short_description: stop/start daemon 21 | version_added: "1.0.0" 22 | description: 23 | - Start, stop or restart ceph daemon 24 | options: 25 | fsid: 26 | description: 27 | - the fsid of the Ceph cluster to interact with. 28 | type: str 29 | required: false 30 | image: 31 | description: 32 | - The Ceph container image to use. 33 | type: str 34 | required: false 35 | docker: 36 | description: 37 | - Use docker instead of podman 38 | type: bool 39 | default: false 40 | required: false 41 | state: 42 | description: 43 | - The desired state of the service specified in 'name'. 44 | - If 'started', it ensures the service is started. 45 | - If 'stopped', it ensures the service is stopped. 46 | - If 'restarted', it will restart the service. 47 | choices: 48 | - started 49 | - stopped 50 | - restarted 51 | type: str 52 | required: True 53 | daemon_id: 54 | description: 55 | - The id of the service. 56 | type: str 57 | required: true 58 | daemon_type: 59 | description: 60 | - The type of the service. 61 | type: str 62 | required: true 63 | 64 | author: 65 | - Guillaume Abrioux (@guits) 66 | ''' 67 | 68 | EXAMPLES = ''' 69 | - name: start osd.0 70 | ceph_orch_daemon: 71 | state: started 72 | daemon_id: 0 73 | daemon_type: osd 74 | 75 | - name: stop mon.ceph-node0 76 | ceph_orch_daemon: 77 | state: stopped 78 | daemon_id: ceph-node0 79 | daemon_type: mon 80 | ''' 81 | 82 | RETURN = '''# ''' 83 | 84 | from ansible.module_utils.basic import AnsibleModule # type: ignore 85 | try: 86 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import retry, exit_module, build_base_cmd_orch, fatal # type: ignore 87 | except ImportError: 88 | from module_utils.ceph_common import retry, exit_module, build_base_cmd_orch, fatal # type: ignore 89 | 90 | from typing import List, Tuple 91 | import datetime 92 | import json 93 | 94 | 95 | def get_current_state(module: "AnsibleModule", 96 | daemon_type: str, 97 | daemon_id: str) -> Tuple[int, List[str], str, str]: 98 | cmd = build_base_cmd_orch(module) 99 | cmd.extend(['ps', '--daemon_type', 100 | daemon_type, '--daemon_id', 101 | daemon_id, '--format', 'json', 102 | '--refresh']) 103 | rc, out, err = module.run_command(cmd) 104 | 105 | return rc, cmd, out, err 106 | 107 | 108 | def update_daemon_status(module: "AnsibleModule", 109 | action: str, 110 | daemon_name: str) -> Tuple[int, List[str], str, str]: 111 | cmd = build_base_cmd_orch(module) 112 | cmd.extend(['daemon', action, daemon_name]) 113 | rc, out, err = module.run_command(cmd) 114 | 115 | return rc, cmd, out, err 116 | 117 | 118 | @retry(RuntimeError, AnsibleModule) 119 | def validate_updated_status(module: "AnsibleModule", 120 | action: str, 121 | daemon_type: str, 122 | daemon_id: str) -> None: 123 | rc, cmd, out, err = get_current_state(module, daemon_type, daemon_id) 124 | expected_state = 1 if action == 'start' else 0 125 | if not json.loads(out)[0]['status'] == expected_state: 126 | raise RuntimeError("Status for {}.{} isn't reported as expected.".format(daemon_type, daemon_id)) 127 | 128 | 129 | def main() -> None: 130 | module = AnsibleModule( 131 | argument_spec=dict( 132 | state=dict(type='str', 133 | required=True, 134 | choices=['started', 'stopped', 'restarted']), 135 | daemon_id=dict(type='str', required=True), 136 | daemon_type=dict(type='str', required=True), 137 | docker=dict(type='bool', 138 | required=False, 139 | default=False), 140 | fsid=dict(type='str', required=False), 141 | image=dict(type='str', required=False) 142 | ), 143 | supports_check_mode=True, 144 | ) 145 | 146 | # Gather module parameters in variables 147 | state = module.params.get('state') 148 | daemon_id = module.params.get('daemon_id') 149 | daemon_type = module.params.get('daemon_type') 150 | daemon_name = "{}.{}".format(daemon_type, daemon_id) 151 | 152 | if module.check_mode: 153 | module.exit_json( 154 | changed=False, 155 | stdout='', 156 | cmd=[], 157 | stderr='', 158 | rc=0, 159 | start='', 160 | end='', 161 | delta='', 162 | ) 163 | 164 | startd = datetime.datetime.now() 165 | changed = False 166 | 167 | rc, cmd, out, err = get_current_state(module, daemon_type, daemon_id) 168 | 169 | if rc or not json.loads(out): 170 | if not err: 171 | err = 'osd id {} not found'.format(daemon_id) 172 | fatal("Can't get current status of {}: {}".format(daemon_name, err), module) 173 | 174 | is_running = json.loads(out)[0]['status'] == 1 175 | 176 | current_state = 'started' if is_running else 'stopped' 177 | action = 'start' if state == 'started' else 'stop' 178 | if state == current_state: 179 | out = "{} is already {}, skipping.".format(daemon_name, state) 180 | else: 181 | rc, cmd, out, err = update_daemon_status(module, action, daemon_name) 182 | validate_updated_status(module, action, daemon_type, daemon_id) 183 | changed = True 184 | 185 | if state == 'restarted': 186 | action = 'restart' 187 | changed = True 188 | rc, cmd, out, err = update_daemon_status(module, action, daemon_name) 189 | 190 | if rc: 191 | fatal("Can't {} {}: {}".format(action, daemon_name, err)) 192 | 193 | exit_module(module=module, out=out, rc=rc, 194 | cmd=cmd, err=err, startd=startd, 195 | changed=changed) 196 | 197 | 198 | if __name__ == '__main__': 199 | main() 200 | -------------------------------------------------------------------------------- /tests/unit/modules/test_ceph_dashboard_user.py: -------------------------------------------------------------------------------- 1 | from mock.mock import MagicMock, patch 2 | import pytest 3 | import os 4 | from ansible_collections.ceph.automation.tests.unit.modules import ca_test_common 5 | from ansible_collections.ceph.automation.plugins.modules import ceph_dashboard_user 6 | 7 | fake_container_binary = 'podman' 8 | fake_container_image = 'docker.io/ceph/daemon:latest' 9 | 10 | 11 | class TestCephDashboardUserModule(object): 12 | def setup_method(self): 13 | self.fake_binary = 'ceph' 14 | self.fake_cluster = 'ceph' 15 | self.fake_name = 'foo' 16 | self.fake_user = 'foo' 17 | self.fake_password = 'bar' 18 | self.fake_roles = ['read-only', 'block-manager'] 19 | self.fake_params = {'cluster': self.fake_cluster, 20 | 'name': self.fake_user, 21 | 'password': self.fake_password, 22 | 'roles': self.fake_roles} 23 | self.fake_module = MagicMock() 24 | self.fake_module.params = self.fake_params 25 | 26 | def test_create_user(self): 27 | self.fake_module.params = self.fake_params 28 | expected_cmd = [ 29 | self.fake_binary, 30 | '-n', 'client.admin', 31 | '-k', '/etc/ceph/ceph.client.admin.keyring', 32 | '--cluster', self.fake_cluster, 33 | 'dashboard', 'ac-user-create', 34 | '-i', '-', 35 | self.fake_user 36 | ] 37 | 38 | assert ceph_dashboard_user.create_user(self.fake_module) == expected_cmd 39 | 40 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 41 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 42 | def test_create_user_container(self): 43 | fake_container_cmd = [ 44 | fake_container_binary, 45 | 'run', 46 | '--interactive', 47 | '--rm', 48 | '--net=host', 49 | '-v', '/etc/ceph:/etc/ceph:z', 50 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 51 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 52 | '--entrypoint=' + self.fake_binary, 53 | fake_container_image 54 | ] 55 | self.fake_module.params = self.fake_params 56 | expected_cmd = fake_container_cmd + [ 57 | '-n', 'client.admin', 58 | '-k', '/etc/ceph/ceph.client.admin.keyring', 59 | '--cluster', self.fake_cluster, 60 | 'dashboard', 'ac-user-create', 61 | '-i', '-', 62 | self.fake_user 63 | ] 64 | 65 | assert ceph_dashboard_user.create_user(self.fake_module, container_image=fake_container_image) == expected_cmd 66 | 67 | def test_set_roles(self): 68 | self.fake_module.params = self.fake_params 69 | expected_cmd = [ 70 | self.fake_binary, 71 | '-n', 'client.admin', 72 | '-k', '/etc/ceph/ceph.client.admin.keyring', 73 | '--cluster', self.fake_cluster, 74 | 'dashboard', 'ac-user-set-roles', 75 | self.fake_user 76 | ] 77 | expected_cmd.extend(self.fake_roles) 78 | 79 | assert ceph_dashboard_user.set_roles(self.fake_module) == expected_cmd 80 | 81 | def test_set_password(self): 82 | self.fake_module.params = self.fake_params 83 | expected_cmd = [ 84 | self.fake_binary, 85 | '-n', 'client.admin', 86 | '-k', '/etc/ceph/ceph.client.admin.keyring', 87 | '--cluster', self.fake_cluster, 88 | 'dashboard', 'ac-user-set-password', 89 | '-i', '-', 90 | self.fake_user 91 | ] 92 | 93 | assert ceph_dashboard_user.set_password(self.fake_module) == expected_cmd 94 | 95 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 96 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 97 | def test_set_password_container(self): 98 | fake_container_cmd = [ 99 | fake_container_binary, 100 | 'run', 101 | '--interactive', 102 | '--rm', 103 | '--net=host', 104 | '-v', '/etc/ceph:/etc/ceph:z', 105 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 106 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 107 | '--entrypoint=' + self.fake_binary, 108 | fake_container_image 109 | ] 110 | self.fake_module.params = self.fake_params 111 | expected_cmd = fake_container_cmd + [ 112 | '-n', 'client.admin', 113 | '-k', '/etc/ceph/ceph.client.admin.keyring', 114 | '--cluster', self.fake_cluster, 115 | 'dashboard', 'ac-user-set-password', 116 | '-i', '-', 117 | self.fake_user 118 | ] 119 | 120 | assert ceph_dashboard_user.set_password(self.fake_module, container_image=fake_container_image) == expected_cmd 121 | 122 | def test_get_user(self): 123 | self.fake_module.params = self.fake_params 124 | expected_cmd = [ 125 | self.fake_binary, 126 | '-n', 'client.admin', 127 | '-k', '/etc/ceph/ceph.client.admin.keyring', 128 | '--cluster', self.fake_cluster, 129 | 'dashboard', 'ac-user-show', 130 | self.fake_user, 131 | '--format=json' 132 | ] 133 | 134 | assert ceph_dashboard_user.get_user(self.fake_module) == expected_cmd 135 | 136 | def test_remove_user(self): 137 | self.fake_module.params = self.fake_params 138 | expected_cmd = [ 139 | self.fake_binary, 140 | '-n', 'client.admin', 141 | '-k', '/etc/ceph/ceph.client.admin.keyring', 142 | '--cluster', self.fake_cluster, 143 | 'dashboard', 'ac-user-delete', 144 | self.fake_user 145 | ] 146 | 147 | assert ceph_dashboard_user.remove_user(self.fake_module) == expected_cmd 148 | 149 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 150 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 151 | def test_create_user_fail_with_weak_password(self, m_run_command, m_fail_json): 152 | ca_test_common.set_module_args(self.fake_module.params) 153 | m_fail_json.side_effect = ca_test_common.fail_json 154 | get_rc = 2 155 | get_stderr = 'Error ENOENT: User {} does not exist.'.format(self.fake_user) 156 | get_stdout = '' 157 | create_rc = 22 158 | create_stderr = 'Error EINVAL: Password is too weak.' 159 | create_stdout = '' 160 | m_run_command.side_effect = [ 161 | (get_rc, get_stdout, get_stderr), 162 | (create_rc, create_stdout, create_stderr) 163 | ] 164 | 165 | with pytest.raises(ca_test_common.AnsibleFailJson) as result: 166 | ceph_dashboard_user.main() 167 | 168 | result = result.value.args[0] 169 | assert result['msg'] == create_stderr 170 | assert result['rc'] == 1 171 | -------------------------------------------------------------------------------- /tests/unit/modules/test_ceph_osd_flag.py: -------------------------------------------------------------------------------- 1 | from mock.mock import patch 2 | import os 3 | import pytest 4 | from ansible_collections.ceph.automation.tests.unit.modules import ca_test_common 5 | from ansible_collections.ceph.automation.plugins.modules import ceph_osd_flag 6 | 7 | fake_cluster = 'ceph' 8 | fake_container_binary = 'podman' 9 | fake_container_image = 'quay.io/ceph/daemon:latest' 10 | fake_flag = 'noup' 11 | fake_user = 'client.admin' 12 | fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) 13 | invalid_flag = 'nofoo' 14 | 15 | 16 | class TestCephOSDFlagModule(object): 17 | 18 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 19 | def test_without_parameters(self, m_fail_json): 20 | ca_test_common.set_module_args({}) 21 | m_fail_json.side_effect = ca_test_common.fail_json 22 | 23 | with pytest.raises(ca_test_common.AnsibleFailJson) as result: 24 | ceph_osd_flag.main() 25 | 26 | result = result.value.args[0] 27 | assert result['msg'] == 'missing required arguments: name' 28 | 29 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 30 | def test_with_invalid_flag(self, m_fail_json): 31 | ca_test_common.set_module_args({ 32 | 'name': invalid_flag, 33 | }) 34 | m_fail_json.side_effect = ca_test_common.fail_json 35 | 36 | with pytest.raises(ca_test_common.AnsibleFailJson) as result: 37 | ceph_osd_flag.main() 38 | 39 | result = result.value.args[0] 40 | assert result['msg'] == ('value of name must be one of: noup, nodown, ' 41 | 'noout, nobackfill, norebalance, norecover, ' 42 | 'noscrub, nodeep-scrub, got: {}'.format(invalid_flag)) 43 | 44 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 45 | def test_with_check_mode(self, m_exit_json): 46 | ca_test_common.set_module_args({ 47 | 'name': fake_flag, 48 | '_ansible_check_mode': True 49 | }) 50 | m_exit_json.side_effect = ca_test_common.exit_json 51 | 52 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 53 | ceph_osd_flag.main() 54 | 55 | result = result.value.args[0] 56 | assert not result['changed'] 57 | assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] 58 | assert result['rc'] == 0 59 | assert not result['stdout'] 60 | assert not result['stderr'] 61 | 62 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 63 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 64 | def test_with_failure(self, m_run_command, m_exit_json): 65 | ca_test_common.set_module_args({ 66 | 'name': fake_flag 67 | }) 68 | m_exit_json.side_effect = ca_test_common.exit_json 69 | stdout = '' 70 | stderr = 'Error EINVAL: invalid command' 71 | rc = 22 72 | m_run_command.return_value = rc, stdout, stderr 73 | 74 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 75 | ceph_osd_flag.main() 76 | 77 | result = result.value.args[0] 78 | assert result['changed'] 79 | assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] 80 | assert result['rc'] == rc 81 | assert result['stderr'] == stderr 82 | 83 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 84 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 85 | def test_set_flag(self, m_run_command, m_exit_json): 86 | ca_test_common.set_module_args({ 87 | 'name': fake_flag, 88 | }) 89 | m_exit_json.side_effect = ca_test_common.exit_json 90 | stdout = '' 91 | stderr = '{} is set'.format(fake_flag) 92 | rc = 0 93 | m_run_command.return_value = rc, stdout, stderr 94 | 95 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 96 | ceph_osd_flag.main() 97 | 98 | result = result.value.args[0] 99 | assert result['changed'] 100 | assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] 101 | assert result['rc'] == rc 102 | assert result['stderr'] == stderr 103 | assert result['stdout'] == stdout 104 | 105 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 106 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 107 | def test_unset_flag(self, m_run_command, m_exit_json): 108 | ca_test_common.set_module_args({ 109 | 'name': fake_flag, 110 | 'state': 'absent' 111 | }) 112 | m_exit_json.side_effect = ca_test_common.exit_json 113 | stdout = '' 114 | stderr = '{} is unset'.format(fake_flag) 115 | rc = 0 116 | m_run_command.return_value = rc, stdout, stderr 117 | 118 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 119 | ceph_osd_flag.main() 120 | 121 | result = result.value.args[0] 122 | assert result['changed'] 123 | assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'unset', fake_flag] 124 | assert result['rc'] == rc 125 | assert result['stderr'] == stderr 126 | assert result['stdout'] == stdout 127 | 128 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 129 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 130 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 131 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 132 | def test_with_container(self, m_run_command, m_exit_json): 133 | ca_test_common.set_module_args({ 134 | 'name': fake_flag, 135 | }) 136 | m_exit_json.side_effect = ca_test_common.exit_json 137 | stdout = '' 138 | stderr = '{} is set'.format(fake_flag) 139 | rc = 0 140 | m_run_command.return_value = rc, stdout, stderr 141 | 142 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 143 | ceph_osd_flag.main() 144 | 145 | result = result.value.args[0] 146 | assert result['changed'] 147 | assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host', 148 | '-v', '/etc/ceph:/etc/ceph:z', 149 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 150 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 151 | '--entrypoint=ceph', fake_container_image, 152 | '-n', fake_user, '-k', fake_keyring, 153 | '--cluster', fake_cluster, 'osd', 'set', fake_flag] 154 | assert result['rc'] == rc 155 | assert result['stderr'] == stderr 156 | assert result['stdout'] == stdout 157 | -------------------------------------------------------------------------------- /plugins/modules/ceph_key_info.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018, Red Hat, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import, division, print_function 16 | __metaclass__ = type 17 | 18 | 19 | ANSIBLE_METADATA = { 20 | 'metadata_version': '1.1', 21 | 'status': ['preview'], 22 | 'supported_by': 'community' 23 | } 24 | 25 | DOCUMENTATION = ''' 26 | --- 27 | module: ceph_key_info 28 | 29 | author: Teoman ONAY (@asM0deuz) 30 | 31 | short_description: Manage Cephx key(s) 32 | 33 | version_added: "1.1.0" 34 | 35 | description: 36 | - the module will return in a json format the description of a given keyring. 37 | options: 38 | cluster: 39 | description: 40 | - The ceph cluster name. 41 | required: false 42 | type: str 43 | default: ceph 44 | name: 45 | description: 46 | - name of the CephX key 47 | type: str 48 | required: true 49 | user: 50 | description: 51 | - entity used to perform operation. It corresponds to the -n option (--name) 52 | type: str 53 | required: false 54 | default: 'client.admin' 55 | user_key: 56 | description: 57 | - the path to the keyring corresponding to the user being used. It corresponds to the -k option (--keyring) 58 | type: str 59 | required: false 60 | output_format: 61 | description: 62 | - The key output format when retrieving the information of an entity. 63 | type: str 64 | choices: ['json', 'plain', 'xml', 'yaml'] 65 | required: false 66 | default: json 67 | mode: 68 | description: 69 | - N/A 70 | type: raw 71 | owner: 72 | description: 73 | - N/A 74 | type: str 75 | group: 76 | description: 77 | - N/A 78 | type: str 79 | seuser: 80 | description: 81 | - N/A 82 | type: str 83 | serole: 84 | description: 85 | - N/A 86 | type: str 87 | selevel: 88 | description: 89 | - N/A 90 | type: str 91 | setype: 92 | description: 93 | - N/A 94 | type: str 95 | attributes: 96 | description: 97 | - N/A 98 | type: str 99 | aliases: 100 | - attr 101 | unsafe_writes: 102 | description: 103 | - N/A 104 | type: bool 105 | default: false 106 | ''' 107 | 108 | EXAMPLES = ''' 109 | - name: info cephx key 110 | ceph_key_info: 111 | name: "my_key" 112 | 113 | - name: info cephx admin key (plain) 114 | ceph_key_info: 115 | name: client.admin 116 | output_format: plain 117 | register: client_admin_key 118 | ''' 119 | 120 | RETURN = '''# ''' 121 | 122 | 123 | try: 124 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import generate_cmd, \ 125 | is_containerized 126 | except ImportError: 127 | from module_utils.ceph_common import generate_cmd, is_containerized 128 | 129 | try: 130 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_key_common import exec_commands 131 | except ImportError: 132 | from module_utils.ceph_key_common import exec_commands 133 | 134 | import os 135 | import datetime 136 | from ansible.module_utils.basic import AnsibleModule 137 | 138 | 139 | def info_key(cluster, name, user, user_key, output_format, container_image=None): # noqa: E501 140 | ''' 141 | Get information about a CephX key 142 | ''' 143 | 144 | cmd_list = [] 145 | 146 | args = [ 147 | 'get', 148 | name, 149 | '-f', 150 | output_format, 151 | ] 152 | 153 | cmd_list.append(generate_cmd(sub_cmd=['auth'], 154 | args=args, 155 | cluster=cluster, 156 | user=user, 157 | user_key=user_key, 158 | container_image=container_image)) 159 | 160 | return cmd_list 161 | 162 | 163 | def run_module(): 164 | module_args = dict( 165 | cluster=dict(type='str', required=False, default='ceph'), 166 | name=dict(type='str', required=True), 167 | user=dict(type='str', required=False, default='client.admin'), 168 | user_key=dict(type='str', required=False, default=None, no_log=False), 169 | output_format=dict(type='str', required=False, default='json', choices=['json', 'plain', 'xml', 'yaml']) # noqa: E501 170 | ) 171 | 172 | module = AnsibleModule( 173 | argument_spec=module_args, 174 | supports_check_mode=True, 175 | add_file_common_args=True, 176 | ) 177 | 178 | # Gather module parameters in variables 179 | name = module.params.get('name') 180 | cluster = module.params.get('cluster') 181 | user = module.params.get('user') 182 | user_key = module.params.get('user_key') 183 | output_format = module.params.get('output_format') 184 | 185 | changed = False 186 | cmd = '' 187 | rc = 0 188 | out = '' 189 | err = '' 190 | 191 | result = dict( 192 | changed=changed, 193 | stdout='', 194 | stderr='', 195 | rc=0, 196 | start='', 197 | end='', 198 | delta='', 199 | ) 200 | 201 | if module.check_mode: 202 | module.exit_json(**result) 203 | 204 | startd = datetime.datetime.now() 205 | 206 | # will return either the image name or None 207 | container_image = is_containerized() 208 | 209 | if not user_key: 210 | user_key_filename = '{}.{}.keyring'.format(cluster, user) 211 | user_key_dir = '/etc/ceph' 212 | user_key_path = os.path.join(user_key_dir, user_key_filename) 213 | else: 214 | user_key_path = user_key 215 | 216 | rc, cmd, out, err = exec_commands( 217 | module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501 218 | 219 | endd = datetime.datetime.now() 220 | delta = endd - startd 221 | 222 | result = dict( 223 | cmd=cmd, 224 | start=str(startd), 225 | end=str(endd), 226 | delta=str(delta), 227 | rc=rc, 228 | stdout=out.rstrip("\r\n"), 229 | stderr=err.rstrip("\r\n"), 230 | changed=changed, 231 | ) 232 | 233 | if rc != 0: 234 | module.fail_json(msg='non-zero return code', **result) 235 | 236 | module.exit_json(**result) 237 | 238 | 239 | def main(): 240 | run_module() 241 | 242 | 243 | if __name__ == '__main__': 244 | main() 245 | -------------------------------------------------------------------------------- /tests/unit/modules/test_ceph_volume_simple_scan.py: -------------------------------------------------------------------------------- 1 | from mock.mock import patch 2 | import os 3 | import pytest 4 | from ansible_collections.ceph.automation.tests.unit.modules import ca_test_common 5 | from ansible_collections.ceph.automation.plugins.modules import ceph_volume_simple_scan 6 | 7 | fake_cluster = 'ceph' 8 | fake_container_binary = 'podman' 9 | fake_container_image = 'quay.io/ceph/daemon:latest' 10 | fake_path = '/var/lib/ceph/osd/ceph-0' 11 | 12 | 13 | class TestCephVolumeSimpleScanModule(object): 14 | 15 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 16 | def test_with_check_mode(self, m_exit_json): 17 | ca_test_common.set_module_args({ 18 | '_ansible_check_mode': True 19 | }) 20 | m_exit_json.side_effect = ca_test_common.exit_json 21 | 22 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 23 | ceph_volume_simple_scan.main() 24 | 25 | result = result.value.args[0] 26 | assert not result['changed'] 27 | assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] 28 | assert result['rc'] == 0 29 | assert not result['stdout'] 30 | assert not result['stderr'] 31 | 32 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 33 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 34 | def test_with_failure(self, m_run_command, m_exit_json): 35 | ca_test_common.set_module_args({ 36 | }) 37 | m_exit_json.side_effect = ca_test_common.exit_json 38 | stdout = '' 39 | stderr = 'error' 40 | rc = 2 41 | m_run_command.return_value = rc, stdout, stderr 42 | 43 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 44 | ceph_volume_simple_scan.main() 45 | 46 | result = result.value.args[0] 47 | assert result['changed'] 48 | assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] 49 | assert result['rc'] == rc 50 | assert result['stderr'] == stderr 51 | 52 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 53 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 54 | def test_scan_all_osds(self, m_run_command, m_exit_json): 55 | ca_test_common.set_module_args({ 56 | }) 57 | m_exit_json.side_effect = ca_test_common.exit_json 58 | stdout = '' 59 | stderr = '' 60 | rc = 0 61 | m_run_command.return_value = rc, stdout, stderr 62 | 63 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 64 | ceph_volume_simple_scan.main() 65 | 66 | result = result.value.args[0] 67 | assert result['changed'] 68 | assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] 69 | assert result['rc'] == rc 70 | assert result['stderr'] == stderr 71 | assert result['stdout'] == stdout 72 | 73 | @patch.object(os.path, 'exists', return_value=True) 74 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 75 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 76 | def test_scan_path_exists(self, m_run_command, m_exit_json, m_os_path): 77 | ca_test_common.set_module_args({ 78 | 'path': fake_path 79 | }) 80 | m_exit_json.side_effect = ca_test_common.exit_json 81 | stdout = '' 82 | stderr = '' 83 | rc = 0 84 | m_run_command.return_value = rc, stdout, stderr 85 | 86 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 87 | ceph_volume_simple_scan.main() 88 | 89 | result = result.value.args[0] 90 | assert result['changed'] 91 | assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', fake_path] 92 | assert result['rc'] == rc 93 | assert result['stderr'] == stderr 94 | assert result['stdout'] == stdout 95 | 96 | @patch.object(os.path, 'exists', return_value=False) 97 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 98 | def test_scan_path_not_exists(self, m_fail_json, m_os_path): 99 | ca_test_common.set_module_args({ 100 | 'path': fake_path 101 | }) 102 | m_fail_json.side_effect = ca_test_common.fail_json 103 | 104 | with pytest.raises(ca_test_common.AnsibleFailJson) as result: 105 | ceph_volume_simple_scan.main() 106 | 107 | result = result.value.args[0] 108 | assert result['msg'] == '{} does not exist'.format(fake_path) 109 | assert result['rc'] == 1 110 | 111 | @patch.object(os.path, 'exists', return_value=True) 112 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 113 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 114 | def test_scan_path_stdout_force(self, m_run_command, m_exit_json, m_os_path): 115 | ca_test_common.set_module_args({ 116 | 'path': fake_path, 117 | 'force': True, 118 | 'stdout': True 119 | }) 120 | m_exit_json.side_effect = ca_test_common.exit_json 121 | stdout = '' 122 | stderr = '' 123 | rc = 0 124 | m_run_command.return_value = rc, stdout, stderr 125 | 126 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 127 | ceph_volume_simple_scan.main() 128 | 129 | result = result.value.args[0] 130 | assert result['changed'] 131 | assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', '--force', '--stdout', fake_path] 132 | assert result['rc'] == rc 133 | assert result['stderr'] == stderr 134 | assert result['stdout'] == stdout 135 | 136 | @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) 137 | @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) 138 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 139 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 140 | def test_scan_with_container(self, m_run_command, m_exit_json): 141 | ca_test_common.set_module_args({ 142 | }) 143 | m_exit_json.side_effect = ca_test_common.exit_json 144 | stdout = '' 145 | stderr = '' 146 | rc = 0 147 | m_run_command.return_value = rc, stdout, stderr 148 | 149 | with pytest.raises(ca_test_common.AnsibleExitJson) as result: 150 | ceph_volume_simple_scan.main() 151 | 152 | result = result.value.args[0] 153 | assert result['changed'] 154 | assert result['cmd'] == [fake_container_binary, 155 | 'run', '--rm', '--privileged', 156 | '--ipc=host', '--net=host', 157 | '-v', '/etc/ceph:/etc/ceph:z', 158 | '-v', '/var/lib/ceph/:/var/lib/ceph/:z', 159 | '-v', '/var/log/ceph/:/var/log/ceph/:z', 160 | '-v', '/run/lvm/:/run/lvm/', 161 | '-v', '/run/lock/lvm/:/run/lock/lvm/', 162 | '--entrypoint=ceph-volume', fake_container_image, 163 | '--cluster', fake_cluster, 'simple', 'scan'] 164 | assert result['rc'] == rc 165 | assert result['stderr'] == stderr 166 | assert result['stdout'] == stdout 167 | -------------------------------------------------------------------------------- /plugins/modules/ceph_authtool.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright 2018, Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | from __future__ import absolute_import, division, print_function 19 | __metaclass__ = type 20 | 21 | ANSIBLE_METADATA = { 22 | 'metadata_version': '1.1', 23 | 'status': ['preview'], 24 | 'supported_by': 'community' 25 | } 26 | 27 | DOCUMENTATION = ''' 28 | --- 29 | module: ceph_authtool 30 | short_description: ceph keyring manipulation 31 | version_added: "1.1.0" 32 | description: 33 | - Create, view, and modify a Ceph keyring file. 34 | options: 35 | name: 36 | description: 37 | - specify entityname to operate on 38 | type: str 39 | required: false 40 | create_keyring: 41 | description: 42 | - will create a new keyring, overwriting any existing keyringfile 43 | type: bool 44 | required: false 45 | default: false 46 | gen_key: 47 | description: 48 | - will generate a new secret key for the specified entityname 49 | type: bool 50 | required: false 51 | default: false 52 | add_key: 53 | description: 54 | - will add an encoded key to the keyring 55 | type: str 56 | required: false 57 | import_keyring: 58 | description: 59 | - will import the content of a given keyring to the keyringfile 60 | type: str 61 | required: false 62 | caps: 63 | description: 64 | - will set the capability for given subsystem 65 | type: dict 66 | required: false 67 | path: 68 | description: 69 | - where the caps file will be created 70 | type: str 71 | required: true 72 | mode: 73 | description: 74 | - N/A 75 | type: raw 76 | owner: 77 | description: 78 | - N/A 79 | type: str 80 | group: 81 | description: 82 | - N/A 83 | type: str 84 | seuser: 85 | description: 86 | - N/A 87 | type: str 88 | serole: 89 | description: 90 | - N/A 91 | type: str 92 | selevel: 93 | description: 94 | - N/A 95 | type: str 96 | setype: 97 | description: 98 | - N/A 99 | type: str 100 | attributes: 101 | description: 102 | - N/A 103 | type: str 104 | aliases: 105 | - attr 106 | unsafe_writes: 107 | description: 108 | - N/A 109 | type: bool 110 | default: false 111 | author: 112 | - guillaume abrioux (@guits) 113 | ''' 114 | 115 | EXAMPLES = ''' 116 | - name: Create admin keyring 117 | ceph_authtool: 118 | name: client.admin 119 | path: "/etc/ceph/ceph.client.admin.keyring" 120 | owner: 'ceph' 121 | group: 'ceph' 122 | mode: "0400" 123 | caps: 124 | mon: allow * 125 | mgr: allow * 126 | osd: allow * 127 | mds: allow * 128 | create_keyring: true 129 | gen_key: true 130 | add_key: admin_secret 131 | ''' 132 | 133 | RETURN = '''# ''' 134 | 135 | from ansible.module_utils.basic import AnsibleModule # type: ignore 136 | try: 137 | from ansible_collections.ceph.automation.plugins.module_utils.ceph_common import container_exec, is_containerized # type: ignore 138 | except ImportError: 139 | from module_utils.ceph_common import container_exec, is_containerized 140 | 141 | import datetime 142 | import os 143 | 144 | 145 | class KeyringExists(Exception): 146 | pass 147 | 148 | 149 | def build_cmd(create_keyring=False, 150 | gen_key=False, 151 | import_keyring=None, 152 | caps=None, 153 | name=None, 154 | path=None, 155 | container_image=None): 156 | 157 | auth_tool_binary: str = 'ceph-authtool' 158 | 159 | if container_image: 160 | c = container_exec(auth_tool_binary, 161 | container_image) 162 | else: 163 | c = [auth_tool_binary] 164 | 165 | if name: 166 | c.extend(['-n', name]) 167 | if create_keyring: 168 | if os.path.exists(path): 169 | raise KeyringExists 170 | c.append('-C') 171 | if gen_key: 172 | c.append('-g') 173 | if caps: 174 | for k, v in caps.items(): 175 | c.extend(['--cap'] + [k] + [v]) 176 | 177 | c.append(path) 178 | 179 | if import_keyring: 180 | c.extend(['--import-keyring', import_keyring]) 181 | 182 | return c 183 | 184 | 185 | def run_module(): 186 | module_args = dict( 187 | name=dict(type='str', required=False), 188 | create_keyring=dict(type='bool', required=False, default=False), 189 | gen_key=dict(type='bool', required=False, default=False), 190 | add_key=dict(type='str', required=False, default=None, no_log=False), 191 | import_keyring=dict(type='str', required=False, default=None, no_log=False), 192 | caps=dict(type='dict', required=False, default=None), 193 | path=dict(type='str', required=True), 194 | ) 195 | 196 | module = AnsibleModule( 197 | argument_spec=module_args, 198 | supports_check_mode=True, 199 | add_file_common_args=True, 200 | ) 201 | 202 | cmd = [] 203 | changed = False 204 | 205 | result = dict( 206 | changed=changed, 207 | stdout='', 208 | stderr='', 209 | rc=0, 210 | start='', 211 | end='', 212 | delta='', 213 | ) 214 | 215 | if module.check_mode: 216 | module.exit_json(**result) 217 | 218 | startd = datetime.datetime.now() 219 | 220 | # will return either the image name or None 221 | container_image = is_containerized() 222 | try: 223 | cmd = build_cmd(**module.params, container_image=container_image) 224 | except KeyringExists: 225 | rc = 0 226 | out = f"{module.params['path']} already exists. Skipping" 227 | err = "" 228 | else: 229 | rc, out, err = module.run_command(cmd) 230 | if rc == 0: 231 | changed = True 232 | 233 | endd = datetime.datetime.now() 234 | delta = endd - startd 235 | 236 | result = dict( 237 | cmd=cmd, 238 | start=str(startd), 239 | end=str(endd), 240 | delta=str(delta), 241 | rc=rc, 242 | stdout=out.rstrip("\r\n"), 243 | stderr=err.rstrip("\r\n"), 244 | changed=changed, 245 | ) 246 | if rc != 0: 247 | module.fail_json(msg='non-zero return code', **result) 248 | 249 | # file_args = module.load_file_common_arguments(module.params) 250 | # module.set_fs_attributes_if_different(file_args, False) 251 | module.exit_json(**result) 252 | 253 | 254 | def main(): 255 | run_module() 256 | 257 | 258 | if __name__ == '__main__': 259 | main() 260 | --------------------------------------------------------------------------------