├── .ansible-lint ├── .github ├── CODEOWNERS └── workflows │ ├── bump-changelogs.yml │ ├── publish-collection.yml │ └── test.yml ├── .gitignore ├── CHANGELOG.rst ├── LICENSE ├── README.md ├── changelogs ├── changelog.yaml ├── config.yaml └── fragments │ └── .gitkeep ├── galaxy.yml ├── meta └── runtime.yml ├── plugins ├── module_utils │ └── cephadm_common.py └── modules │ ├── cephadm_crush_rule.py │ ├── cephadm_ec_profile.py │ ├── cephadm_key.py │ └── cephadm_pool.py ├── roles ├── cephadm │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── bootstrap.yml │ │ ├── destroy.yml │ │ ├── main.yml │ │ ├── openstack.yml │ │ ├── osds.yml │ │ ├── osds_spec.yml │ │ ├── pkg_debian.yml │ │ ├── pkg_redhat.yml │ │ ├── prechecks.yml │ │ ├── prereqs.yml │ │ └── prereqs_bootstrap.yml │ ├── templates │ │ └── cluster.yml.j2 │ └── vars │ │ └── main.yml ├── commands │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── crush_rules │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── ec_profiles │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── enter_maintenance │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── enter.yml │ │ └── main.yml ├── exit_maintenance │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── exit.yml │ │ └── main.yml ├── keys │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml └── pools │ ├── README.md │ ├── defaults │ └── main.yml │ └── tasks │ └── main.yml ├── test-requirements.txt └── tests ├── sanity ├── ignore-2.14.txt ├── ignore-2.15.txt ├── ignore-2.16.txt ├── ignore-2.17.txt ├── ignore-2.18.txt └── ignore.txt └── unit └── modules ├── __init__.py ├── cephadm_test_common.py └── test_cephadm_crush_rule.py /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | profile: shared 3 | 4 | exclude_paths: 5 | - changelogs/ 6 | - .github/ 7 | 8 | skip_list: 9 | - var-naming[no-role-prefix] 10 | 11 | warn_list: 12 | - meta-runtime[unsupported-version] 13 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @stackhpc/ansible 2 | -------------------------------------------------------------------------------- /.github/workflows/bump-changelogs.yml: -------------------------------------------------------------------------------- 1 | name: Update changelogs on galaxy version bump 2 | on: 3 | push: 4 | paths: 5 | - galaxy.yml 6 | branches: 7 | - antsibull_changelogs # change before merge 8 | jobs: 9 | bump-changelogs: 10 | uses: stackhpc/.github/.github/workflows/antsibull-release.yml@update_changelogs # todo: change to main once merged 11 | -------------------------------------------------------------------------------- /.github/workflows/publish-collection.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Publish Ansible Collection 3 | 'on': 4 | push: 5 | tags: 6 | - "v?[0-9]+.[0-9]+.[0-9]+" 7 | workflow_dispatch: 8 | jobs: 9 | publish_collection: 10 | uses: stackhpc/.github/.github/workflows/publish-collection.yml@main 11 | secrets: 12 | GALAXY_API_KEY: ${{ secrets.GALAXY_API_KEY }} 13 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Tests 3 | 4 | # Controls when the action will run. 5 | on: 6 | pull_request: 7 | push: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | steps: 17 | # Checks-out the repository under $GITHUB_WORKSPACE, so it's accessible to the job 18 | - uses: actions/checkout@v3 19 | 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip 23 | pip install -r test-requirements.txt 24 | 25 | - name: Linting code 26 | run: | 27 | ansible-lint -v --force-color 28 | antsibull-changelog lint 29 | 30 | ansible-test: 31 | runs-on: ubuntu-latest 32 | strategy: 33 | fail-fast: false 34 | steps: 35 | # Checks-out the repository under $GITHUB_WORKSPACE, so it's accessible to the job 36 | - uses: actions/checkout@v3 37 | with: 38 | path: ansible_collections/stackhpc/cephadm 39 | 40 | - name: Install dependencies 41 | working-directory: ansible_collections/stackhpc/cephadm 42 | run: | 43 | python -m pip install --upgrade pip 44 | pip install -r test-requirements.txt 45 | 46 | - name: Run ansible-test sanity to download docker images 47 | working-directory: ansible_collections/stackhpc/cephadm 48 | run: | 49 | ansible-test sanity --docker --prime-containers 50 | 51 | - name: Run ansible-test sanity 52 | working-directory: ansible_collections/stackhpc/cephadm 53 | run: | 54 | ansible-test sanity --verbose --docker --junit 55 | 56 | - name: Run ansible-test units 57 | working-directory: ansible_collections/stackhpc/cephadm 58 | run: | 59 | ansible-test units --verbose --docker --coverage --requirements 60 | ansible-test coverage xml --requirements 61 | ansible-test coverage html --requirements 62 | 63 | - name: Print coverage report 64 | working-directory: ansible_collections/stackhpc/cephadm 65 | run: | 66 | ansible-test coverage report 67 | 68 | - name: Publish Test Report 69 | uses: mikepenz/action-junit-report@v3 70 | if: success() || failure() # always run even if the previous step fails 71 | with: 72 | report_paths: '**/tests/output/*/*.xml' 73 | 74 | - name: Upload HTML report 75 | uses: actions/upload-artifact@v4 76 | with: 77 | name: coverage-html 78 | path: ansible_collections/stackhpc/cephadm/tests/output/reports/coverage/ 79 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # antsibull-changelog cache 2 | /changelogs/.plugin-cache.yaml 3 | 4 | # ansible-test output 5 | /tests/output 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | stackhpc.cephadm Release Notes 3 | ============================== 4 | 5 | .. contents:: Topics 6 | 7 | v1.19.1 8 | ======= 9 | 10 | Bugfixes 11 | -------- 12 | 13 | - pools - cephadm_pool tasks now correctly run with sudo 14 | 15 | v1.16.0 16 | ======= 17 | 18 | Release Summary 19 | --------------- 20 | 21 | Fix idempotency issue in cephadm_keys plugin. `cephadm_keys` no 22 | longer generates keyring files on Ceph hosts, and additional tasks 23 | are required to write keyring files to disk - see the cephadm_keys 24 | README.md for further details. 25 | 26 | Minor Changes 27 | ------------- 28 | 29 | - Deprecate `fetch_inital_keys` functionality in cephadm_keys plugin 30 | - Deprecate `generate_keys` functionality in cephadm_keys plugin 31 | - Fix issue with idempotency in cephadm_keys plugin, by no longer generating user keyring files on Ceph hosts. 32 | 33 | v1.13.0 34 | ======= 35 | 36 | Release Summary 37 | --------------- 38 | 39 | Minor release adding support for choosing plugin in EC profiles 40 | 41 | Minor Changes 42 | ------------- 43 | 44 | - Add support for choosing plugin in EC profiles 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # StackHPC cephadm collection 2 | 3 | [![Tests](https://github.com/stackhpc/ansible-collection-cephadm/actions/workflows/test.yml/badge.svg)](https://github.com/stackhpc/ansible-collection-cephadm/actions/workflows/test.yml) [![Publish Ansible Collection](https://github.com/stackhpc/ansible-collection-cephadm/actions/workflows/publish-collection.yml/badge.svg)](https://github.com/stackhpc/ansible-collection-cephadm/actions/workflows/publish-collection.yml) 4 | 5 | This repo contains `stackhpc.cephadm` Ansible Collection. The collection includes modules and plugins supported by StackHPC for cephadm based deployments. 6 | 7 | ## Tested with Ansible 8 | 9 | Tested with the current Ansible 2.9 and 2.10 releases. 10 | 11 | ## Included content 12 | 13 | Roles: 14 | * [cephadm](roles/cephadm/README.md) for deployment/bootstrapping 15 | * [commands](roles/commands/README.md) for running arbitrary commands 16 | * [crush_rules](roles/crush_rules/README.md) for defining CRUSH rules 17 | * [ec_profiles](roles/ec_profiles/README.md) for defining EC profiles 18 | * [enter_maintenance](roles/enter_maintenance/README.md) for placing hosts into maintenance 19 | * [exit_maintenance](roles/exit_maintenance/README.md) for removing hosts from maintenance 20 | * [keys](roles/keys/README.md) for defining auth keys 21 | * [pools](roles/pools/README.md) for defining pools 22 | 23 | ## Using this collection 24 | 25 | Before using the collection, you need to install the collection with the `ansible-galaxy` CLI: 26 | 27 | ansible-galaxy collection install stackhpc.cephadm 28 | 29 | You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format: 30 | 31 | ```yaml 32 | collections: 33 | - name: stackhpc.cephadm 34 | ``` 35 | 36 | See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. 37 | 38 | ## Release notes handling 39 | 40 | See [antsibull-changelog docs](https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelogs.rst) for instructions how to deal with release notes. 41 | 42 | ## More information 43 | 44 | - [Ansible Collection overview](https://github.com/ansible-collections/overview) 45 | - [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) 46 | - [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) 47 | - [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) 48 | 49 | ## Licensing 50 | 51 | Apache License Version 2.0 52 | -------------------------------------------------------------------------------- /changelogs/changelog.yaml: -------------------------------------------------------------------------------- 1 | ancestor: null 2 | releases: 3 | 1.13.0: 4 | changes: 5 | minor_changes: 6 | - Add support for choosing plugin in EC profiles 7 | release_summary: 'Minor release adding support for choosing plugin in EC profiles 8 | 9 | ' 10 | release_date: '2023-02-28' 11 | 1.16.0: 12 | changes: 13 | minor_changes: 14 | - Deprecate `fetch_inital_keys` functionality in cephadm_keys plugin 15 | - Deprecate `generate_keys` functionality in cephadm_keys plugin 16 | - Fix issue with idempotency in cephadm_keys plugin, by no longer generating 17 | user keyring files on Ceph hosts. 18 | release_summary: 'Fix idempotency issue in cephadm_keys plugin. `cephadm_keys` 19 | no 20 | 21 | longer generates keyring files on Ceph hosts, and additional tasks 22 | 23 | are required to write keyring files to disk - see the cephadm_keys 24 | 25 | README.md for further details. 26 | 27 | ' 28 | release_date: '2024-07-28' 29 | 1.19.1: 30 | changes: 31 | bugfixes: 32 | - pools - cephadm_pool tasks now correctly run with sudo 33 | fragments: 34 | - sudo-fix.yml 35 | release_date: '2025-01-08' 36 | -------------------------------------------------------------------------------- /changelogs/config.yaml: -------------------------------------------------------------------------------- 1 | changelog_filename_template: ../CHANGELOG.rst 2 | changelog_filename_version_depth: 0 3 | changes_file: changelog.yaml 4 | changes_format: combined 5 | ignore_other_fragment_extensions: true 6 | keep_fragments: false 7 | mention_ancestor: true 8 | new_plugins_after_name: removed_features 9 | notesdir: fragments 10 | prelude_section_name: release_summary 11 | prelude_section_title: Release Summary 12 | sanitize_changelog: true 13 | sections: 14 | - - major_changes 15 | - Major Changes 16 | - - minor_changes 17 | - Minor Changes 18 | - - breaking_changes 19 | - Breaking Changes / Porting Guide 20 | - - deprecated_features 21 | - Deprecated Features 22 | - - removed_features 23 | - Removed Features (previously deprecated) 24 | - - security_fixes 25 | - Security Fixes 26 | - - bugfixes 27 | - Bugfixes 28 | - - known_issues 29 | - Known Issues 30 | title: stackhpc.cephadm 31 | trivial_section_name: trivial 32 | use_fqcn: true 33 | -------------------------------------------------------------------------------- /changelogs/fragments/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackhpc/ansible-collection-cephadm/0035b8ea95843ab8edcb818e8e87085157866402/changelogs/fragments/.gitkeep -------------------------------------------------------------------------------- /galaxy.yml: -------------------------------------------------------------------------------- 1 | namespace: "stackhpc" 2 | name: "cephadm" 3 | version: "1.19.3" 4 | readme: "README.md" 5 | authors: 6 | - "Michal Nasiadka" 7 | - "Bartosz Bezak" 8 | - "John Garbutt" 9 | - "Mark Goddard" 10 | license: 11 | - "Apache-2.0" 12 | tags: 13 | - cephadm 14 | - infrastructure 15 | description: "Ansible roles to create and manage cephadm-based Ceph deployments" 16 | repository: "https://github.com/stackhpc/ansible-collection-cephadm" 17 | -------------------------------------------------------------------------------- /meta/runtime.yml: -------------------------------------------------------------------------------- 1 | requires_ansible: ">=2.9" 2 | -------------------------------------------------------------------------------- /plugins/module_utils/cephadm_common.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020, Red Hat, Inc. 2 | # Copyright 2021, StackHPC, Ltd. 3 | # NOTE: Files adapted from github.com/ceph/ceph-ansible 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from __future__ import (absolute_import, division, print_function) 18 | __metaclass__ = type 19 | 20 | import datetime 21 | 22 | 23 | def generate_ceph_cmd(sub_cmd, args): 24 | ''' 25 | Generate 'ceph' command line to execute 26 | ''' 27 | 28 | cmd = [ 29 | 'cephadm', 30 | '--timeout', 31 | '60', 32 | 'shell', 33 | '--', 34 | 'ceph', 35 | ] 36 | cmd.extend(sub_cmd + args) 37 | 38 | return cmd 39 | 40 | 41 | def exec_command(module, cmd, stdin=None): 42 | ''' 43 | Execute command(s) 44 | ''' 45 | 46 | binary_data = False 47 | if stdin: 48 | binary_data = True 49 | rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data) 50 | 51 | return rc, cmd, out, err 52 | 53 | 54 | def exit_module(module, out, rc, cmd, err, startd, changed=False): 55 | endd = datetime.datetime.now() 56 | delta = endd - startd 57 | 58 | result = dict( 59 | cmd=cmd, 60 | start=str(startd), 61 | end=str(endd), 62 | delta=str(delta), 63 | rc=rc, 64 | stdout=out.rstrip("\r\n"), 65 | stderr=err.rstrip("\r\n"), 66 | changed=changed, 67 | ) 68 | module.exit_json(**result) 69 | 70 | 71 | def fatal(message, module): 72 | ''' 73 | Report a fatal error and exit 74 | ''' 75 | 76 | if module: 77 | module.fail_json(msg=message, rc=1) 78 | else: 79 | raise Exception(message) 80 | -------------------------------------------------------------------------------- /plugins/modules/cephadm_crush_rule.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | # Copyright 2020, Red Hat, Inc. 4 | # Copyright 2021, StackHPC, Ltd. 5 | # NOTE: Files adapted from github.com/ceph/ceph-ansible 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | from __future__ import absolute_import, division, print_function 20 | __metaclass__ = type 21 | 22 | 23 | DOCUMENTATION = r''' 24 | --- 25 | module: cephadm_crush_rule 26 | short_description: Manage Ceph Crush Replicated/Erasure Rule 27 | version_added: "1.4.0" 28 | description: 29 | - Manage Ceph Crush rule(s) creation, deletion and updates. 30 | author: 31 | - Dimitri Savineau 32 | - Michal Nasiadka 33 | options: 34 | name: 35 | description: 36 | - name of the Ceph Crush rule. 37 | required: true 38 | type: str 39 | state: 40 | description: 41 | If 'present' is used, the module creates a rule if it doesn't 42 | exist or update it if it already exists. 43 | If 'absent' is used, the module will simply delete the rule. 44 | If 'info' is used, the module will return all details about the 45 | existing rule (json formatted). 46 | required: false 47 | choices: ['present', 'absent', 'info'] 48 | default: present 49 | type: str 50 | rule_type: 51 | description: 52 | - The ceph CRUSH rule type. 53 | required: false 54 | choices: ['replicated', 'erasure'] 55 | type: str 56 | bucket_root: 57 | description: 58 | - The ceph bucket root for replicated rule. 59 | required: false 60 | type: str 61 | bucket_type: 62 | description: 63 | - The ceph bucket type for replicated rule. 64 | required: false 65 | choices: ['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod', 66 | 'room', 'datacenter', 'zone', 'region', 'root'] 67 | type: str 68 | device_class: 69 | description: 70 | - The ceph device class for replicated rule. 71 | required: false 72 | type: str 73 | profile: 74 | description: 75 | - The ceph erasure profile for erasure rule. 76 | required: false 77 | type: str 78 | ''' 79 | 80 | EXAMPLES = ''' 81 | - name: create a Ceph Crush replicated rule 82 | cephadm_crush_rule: 83 | name: foo 84 | bucket_root: default 85 | bucket_type: host 86 | device_class: ssd 87 | rule_type: replicated 88 | 89 | - name: create a Ceph Crush erasure rule 90 | cephadm_crush_rule: 91 | name: foo 92 | profile: bar 93 | rule_type: erasure 94 | 95 | - name: get a Ceph Crush rule information 96 | cephadm_crush_rule: 97 | name: foo 98 | state: info 99 | 100 | - name: delete a Ceph Crush rule 101 | cephadm_crush_rule: 102 | name: foo 103 | state: absent 104 | ''' 105 | 106 | RETURN = '''# ''' 107 | 108 | 109 | from ansible.module_utils.basic import AnsibleModule 110 | from ansible_collections.stackhpc.cephadm.plugins.module_utils.cephadm_common \ 111 | import generate_ceph_cmd, exec_command, exit_module 112 | 113 | import datetime 114 | import json 115 | 116 | 117 | def create_rule(module, container_image=None): 118 | ''' 119 | Create a new crush replicated/erasure rule 120 | ''' 121 | 122 | name = module.params.get('name') 123 | rule_type = module.params.get('rule_type') 124 | bucket_root = module.params.get('bucket_root') 125 | bucket_type = module.params.get('bucket_type') 126 | device_class = module.params.get('device_class') 127 | profile = module.params.get('profile') 128 | 129 | if rule_type == 'replicated': 130 | args = ['create-replicated', name, bucket_root, bucket_type] 131 | if device_class: 132 | args.append(device_class) 133 | else: 134 | args = ['create-erasure', name] 135 | if profile: 136 | args.append(profile) 137 | 138 | cmd = generate_ceph_cmd(['osd', 'crush', 'rule'], 139 | args) 140 | 141 | return cmd 142 | 143 | 144 | def get_rule(module, container_image=None): 145 | ''' 146 | Get existing crush rule 147 | ''' 148 | 149 | name = module.params.get('name') 150 | 151 | args = ['dump', name, '--format=json'] 152 | 153 | cmd = generate_ceph_cmd(['osd', 'crush', 'rule'], 154 | args) 155 | 156 | return cmd 157 | 158 | 159 | def remove_rule(module, container_image=None): 160 | ''' 161 | Remove a crush rule 162 | ''' 163 | 164 | name = module.params.get('name') 165 | 166 | args = ['rm', name] 167 | 168 | cmd = generate_ceph_cmd(['osd', 'crush', 'rule'], 169 | args) 170 | 171 | return cmd 172 | 173 | 174 | def main(): 175 | module = AnsibleModule( 176 | argument_spec=dict( 177 | name=dict(type='str', required=True), 178 | state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 179 | rule_type=dict(type='str', required=False, choices=['replicated', 'erasure']), # noqa: E501 180 | bucket_root=dict(type='str', required=False), 181 | bucket_type=dict(type='str', required=False, choices=['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod', # noqa: E501 182 | 'room', 'datacenter', 'zone', 'region', 'root']), # noqa: E501 183 | device_class=dict(type='str', required=False), 184 | profile=dict(type='str', required=False) 185 | ), 186 | supports_check_mode=True, 187 | required_if=[ 188 | ('state', 'present', ['rule_type']), 189 | ('rule_type', 'replicated', ['bucket_root', 'bucket_type']), 190 | ('rule_type', 'erasure', ['profile']) 191 | ] 192 | ) 193 | 194 | # Gather module parameters in variables 195 | name = module.params.get('name') 196 | state = module.params.get('state') 197 | rule_type = module.params.get('rule_type') 198 | 199 | if module.check_mode: 200 | module.exit_json( 201 | changed=False, 202 | stdout='', 203 | stderr='', 204 | rc=0, 205 | start='', 206 | end='', 207 | delta='', 208 | ) 209 | 210 | startd = datetime.datetime.now() 211 | changed = False 212 | 213 | if state == "present": 214 | rc, cmd, out, err = exec_command(module, get_rule(module)) # noqa: E501 215 | if rc != 0: 216 | rc, cmd, out, err = exec_command(module, create_rule(module)) # noqa: E501 217 | changed = True 218 | else: 219 | rule = json.loads(out) 220 | if (rule['type'] == 1 and rule_type == 'erasure') or (rule['type'] == 3 and rule_type == 'replicated'): # noqa: E501 221 | module.fail_json(msg="Can not convert crush rule {0} to {1}".format(str(name), str(rule_type)), changed=False, rc=1) # noqa: E501 222 | 223 | elif state == "absent": 224 | rc, cmd, out, err = exec_command(module, get_rule(module)) # noqa: E501 225 | if rc == 0: 226 | rc, cmd, out, err = exec_command(module, remove_rule(module)) # noqa: E501 227 | changed = True 228 | else: 229 | rc = 0 230 | out = "Crush Rule {0} doesn't exist".format(name) 231 | 232 | elif state == "info": 233 | rc, cmd, out, err = exec_command(module, get_rule(module)) # noqa: E501 234 | 235 | exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 236 | 237 | 238 | if __name__ == '__main__': 239 | main() 240 | -------------------------------------------------------------------------------- /plugins/modules/cephadm_ec_profile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | # Copyright 2020, Red Hat, Inc. 4 | # Copyright 2021, StackHPC, Ltd. 5 | # NOTE: Files adapted from github.com/ceph/ceph-ansible 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | from __future__ import absolute_import, division, print_function 20 | __metaclass__ = type 21 | 22 | 23 | DOCUMENTATION = ''' 24 | --- 25 | module: cephadm_ec_profile 26 | 27 | short_description: Manage Ceph Erasure Code profile 28 | 29 | version_added: "1.4.0" 30 | 31 | description: 32 | - Manage Ceph Erasure Code profile 33 | options: 34 | name: 35 | description: 36 | - name of the profile. 37 | required: true 38 | type: str 39 | state: 40 | description: 41 | If 'present' is used, the module creates a profile. 42 | If 'absent' is used, the module will delete the profile. 43 | required: false 44 | choices: ['present', 'absent'] 45 | default: present 46 | type: str 47 | stripe_unit: 48 | description: 49 | - The amount of data in a data chunk, per stripe. 50 | required: false 51 | type: str 52 | k: 53 | description: 54 | - Number of data-chunks the object will be split in 55 | required: false 56 | type: str 57 | m: 58 | description: 59 | - Compute coding chunks for each object and store them on different 60 | OSDs. 61 | required: false 62 | type: str 63 | plugin: 64 | description: 65 | - Use the erasure code plugin to compute coding chunks and recover 66 | missing chunks. 67 | required: false 68 | type: str 69 | directory: 70 | description: 71 | - Set the directory name from which the erasure code plugin is 72 | loaded. 73 | required: false 74 | type: str 75 | crush_device_class: 76 | description: 77 | - Restrict placement to devices of a specific class (hdd/ssd) 78 | required: false 79 | type: str 80 | crush_failure_domain: 81 | description: 82 | - Set the failure domain for the CRUSH rule (e.g., 'rack', 'host', 'osd') 83 | required: false 84 | type: str 85 | 86 | author: 87 | - Guillaume Abrioux 88 | - Michal Nasiadka 89 | ''' 90 | 91 | EXAMPLES = ''' 92 | - name: create an erasure code profile 93 | cephadm_ec_profile: 94 | name: foo 95 | k: 4 96 | m: 2 97 | 98 | - name: delete an erasure code profile 99 | cephadm_ec_profile: 100 | name: foo 101 | state: absent 102 | 103 | - name: create an erasure code profile with custom failure domain 104 | cephadm_ec_profile: 105 | name: foo-osd 106 | k: 4 107 | m: 2 108 | crush_failure_domain: osd 109 | ''' 110 | 111 | from ansible.module_utils.basic import AnsibleModule 112 | from ansible_collections.stackhpc.cephadm.plugins.module_utils.cephadm_common \ 113 | import generate_ceph_cmd, exec_command, exit_module 114 | 115 | import datetime 116 | import json 117 | 118 | 119 | RETURN = '''# ''' 120 | 121 | 122 | def get_profile(module, name): 123 | ''' 124 | Get existing profile 125 | ''' 126 | 127 | args = ['get', name, '--format=json'] 128 | 129 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'erasure-code-profile'], 130 | args=args) 131 | 132 | return cmd 133 | 134 | 135 | def create_profile(module, name, k, m, stripe_unit, crush_device_class, crush_failure_domain, directory, plugin, force=False): # noqa: E501 136 | ''' 137 | Create a profile 138 | ''' 139 | 140 | args = ['set', name, 'k={0}'.format(k), 'm={0}'.format(m)] 141 | if stripe_unit: 142 | args.append('stripe_unit={0}'.format(stripe_unit)) 143 | if crush_device_class: 144 | args.append('crush-device-class={0}'.format(crush_device_class)) 145 | if crush_failure_domain: 146 | args.append('crush-failure-domain={0}'.format(crush_failure_domain)) 147 | if directory: 148 | args.append('directory={0}'.format(plugin)) 149 | if plugin: 150 | args.append('plugin={0}'.format(plugin)) 151 | if force: 152 | args.append('--force') 153 | 154 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'erasure-code-profile'], 155 | args=args) 156 | 157 | return cmd 158 | 159 | 160 | def delete_profile(module, name): 161 | ''' 162 | Delete a profile 163 | ''' 164 | 165 | args = ['rm', name] 166 | 167 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'erasure-code-profile'], 168 | args=args) 169 | 170 | return cmd 171 | 172 | 173 | def run_module(): 174 | module_args = dict( 175 | name=dict(type='str', required=True), 176 | state=dict(type='str', required=False, 177 | choices=['present', 'absent'], default='present'), 178 | stripe_unit=dict(type='str', required=False), 179 | k=dict(type='str', required=False), 180 | m=dict(type='str', required=False), 181 | crush_device_class=dict(type='str', required=False), 182 | crush_failure_domain=dict(type='str', required=False), 183 | directory=dict(type='str', required=False), 184 | plugin=dict(type='str', required=False), 185 | ) 186 | 187 | module = AnsibleModule( 188 | argument_spec=module_args, 189 | supports_check_mode=True, 190 | required_if=[['state', 'present', ['k', 'm']]], 191 | ) 192 | 193 | # Gather module parameters in variables 194 | name = module.params.get('name') 195 | state = module.params.get('state') 196 | stripe_unit = module.params.get('stripe_unit') 197 | k = module.params.get('k') 198 | m = module.params.get('m') 199 | crush_device_class = module.params.get('crush_device_class') 200 | crush_failure_domain = module.params.get('crush_failure_domain') 201 | directory = module.params.get('directory') 202 | plugin = module.params.get('plugin') 203 | 204 | if module.check_mode: 205 | module.exit_json( 206 | changed=False, 207 | stdout='', 208 | stderr='', 209 | rc=0, 210 | start='', 211 | end='', 212 | delta='', 213 | ) 214 | 215 | startd = datetime.datetime.now() 216 | changed = False 217 | 218 | if state == "present": 219 | rc, cmd, out, err = exec_command(module, get_profile(module, name)) # noqa: E501 220 | if rc == 0: 221 | # the profile already exists, let's check whether we have to 222 | # update it 223 | current_profile = json.loads(out) 224 | if current_profile['k'] != k or \ 225 | current_profile['m'] != m or \ 226 | current_profile.get('stripe_unit', stripe_unit) != stripe_unit or \ 227 | current_profile.get('crush-device-class', crush_device_class) != crush_device_class or \ 228 | current_profile.get('crush-failure-domain', crush_failure_domain) != crush_failure_domain or \ 229 | current_profile.get('directory', directory) != directory or \ 230 | current_profile.get('plugin', plugin) != plugin: # noqa: E501 231 | rc, cmd, out, err = exec_command(module, 232 | create_profile(module, 233 | name, 234 | k, 235 | m, 236 | stripe_unit, 237 | crush_device_class, # noqa: E501 238 | crush_failure_domain, 239 | directory, 240 | plugin, 241 | force=True)) # noqa: E501 242 | changed = True 243 | else: 244 | # the profile doesn't exist, it has to be created 245 | rc, cmd, out, err = exec_command(module, create_profile(module, 246 | name, 247 | k, 248 | m, 249 | stripe_unit, # noqa: E501 250 | crush_device_class, # noqa: E501 251 | crush_failure_domain, 252 | directory, 253 | plugin)) 254 | if rc == 0: 255 | changed = True 256 | 257 | elif state == "absent": 258 | rc, cmd, out, err = exec_command(module, delete_profile(module, name)) # noqa: E501 259 | if not err: 260 | out = 'Profile {0} removed.'.format(name) 261 | changed = True 262 | else: 263 | rc = 0 264 | out = "Skipping, the profile {0} doesn't exist".format(name) 265 | 266 | exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 267 | 268 | 269 | def main(): 270 | run_module() 271 | 272 | 273 | if __name__ == '__main__': 274 | main() 275 | -------------------------------------------------------------------------------- /plugins/modules/cephadm_key.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | # Copyright 2018, Red Hat, Inc. 4 | # Copyright 2021, StackHPC, Ltd. 5 | # NOTE: Files adapted from github.com/ceph/ceph-ansible 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | from __future__ import absolute_import, division, print_function 20 | __metaclass__ = type 21 | 22 | DOCUMENTATION = r''' 23 | --- 24 | module: cephadm_key 25 | 26 | author: Sebastien Han 27 | Michal Nasiadka 28 | version_added: "1.4.0" 29 | short_description: Manage Cephx key(s) 30 | 31 | description: 32 | - Manage CephX creation, deletion and updates. 33 | It can also list and get information about keyring(s). 34 | 35 | options: 36 | name: 37 | description: 38 | - name of the CephX key 39 | required: false 40 | type: str 41 | state: 42 | description: 43 | - If 'present' is used, the module ensures a keyring exists 44 | with the associated capabilities. 45 | If 'absent' is used, the module will simply delete the keyring. 46 | If 'list' is used, the module will list all the keys and will 47 | return a json output. 48 | If 'info' is used, the module will return in a json format the 49 | description of a given keyring. 50 | required: false 51 | choices: ['present', 'absent', 'list', 'info'] 52 | default: present 53 | type: str 54 | caps: 55 | description: 56 | - CephX key capabilities 57 | default: {} 58 | required: false 59 | type: dict 60 | output_format: 61 | description: 62 | - The key output format when retrieving the information of an 63 | entity. 64 | required: false 65 | choices: ['json', 'plain', 'xml', 'yaml'] 66 | default: json 67 | type: str 68 | ''' 69 | 70 | EXAMPLES = ''' 71 | - name: create cephx key 72 | cephadm_key: 73 | name: "{{ item.name }}" 74 | state: present 75 | caps: "{{ item.caps }}" 76 | with_items: "{{ keys_to_create }}" 77 | 78 | - name: delete cephx key 79 | cephadm_key: 80 | name: "my_key" 81 | state: absent 82 | 83 | - name: info cephx key 84 | cephadm_key: 85 | name: "my_key" 86 | state: info 87 | 88 | - name: info cephx admin key (plain) 89 | cephadm_key: 90 | name: client.admin 91 | output_format: plain 92 | state: info 93 | register: client_admin_key 94 | 95 | - name: list cephx keys 96 | cephadm_key: 97 | state: list 98 | ''' 99 | 100 | RETURN = r''' # ''' 101 | 102 | from ansible.module_utils.basic import AnsibleModule 103 | from ansible_collections.stackhpc.cephadm.plugins.module_utils.cephadm_common \ 104 | import fatal, generate_ceph_cmd 105 | import datetime 106 | import json 107 | 108 | 109 | def str_to_bool(val): 110 | try: 111 | val = val.lower() 112 | except AttributeError: 113 | val = str(val).lower() 114 | if val == 'true': 115 | return True 116 | elif val == 'false': 117 | return False 118 | else: 119 | raise ValueError("Invalid input value: %s" % val) 120 | 121 | 122 | def generate_caps(caps): 123 | ''' 124 | Generate CephX capabilities list 125 | ''' 126 | 127 | caps_cli = [] 128 | 129 | for k, v in caps.items(): 130 | caps_cli.extend([k, v]) 131 | 132 | return caps_cli 133 | 134 | 135 | def create_key(name, caps): # noqa: E501 136 | ''' 137 | Create a CephX key 138 | ''' 139 | cmd = [] 140 | 141 | args = [ 142 | 'get-or-create', 143 | name 144 | ] 145 | 146 | args.extend(generate_caps(caps)) 147 | cmd.append(generate_ceph_cmd(sub_cmd=['auth'], 148 | args=args)) 149 | 150 | return cmd 151 | 152 | 153 | def update_key(name, caps): 154 | ''' 155 | Update the caps of a CephX key 156 | ''' 157 | 158 | cmd = [] 159 | 160 | args = [ 161 | 'caps', 162 | name, 163 | ] 164 | args.extend(generate_caps(caps)) 165 | cmd.append(generate_ceph_cmd(sub_cmd=['auth'], 166 | args=args)) 167 | 168 | return cmd 169 | 170 | 171 | def delete_key(name): 172 | ''' 173 | Delete a CephX key 174 | ''' 175 | 176 | cmd = [] 177 | 178 | args = [ 179 | 'del', 180 | name, 181 | ] 182 | 183 | cmd.append(generate_ceph_cmd(sub_cmd=['auth'], 184 | args=args)) 185 | 186 | return cmd 187 | 188 | 189 | def get_key(name, dest): 190 | ''' 191 | Get a CephX key (write on the filesystem) 192 | ''' 193 | 194 | cmd = [] 195 | 196 | args = [ 197 | 'get', 198 | name, 199 | '-o', 200 | dest, 201 | ] 202 | 203 | cmd.append(generate_ceph_cmd(sub_cmd=['auth'], 204 | args=args)) 205 | 206 | return cmd 207 | 208 | 209 | def info_key(name, output_format): 210 | ''' 211 | Get information about a CephX key 212 | ''' 213 | 214 | cmd_list = [] 215 | 216 | args = [ 217 | 'get', 218 | name, 219 | '-f', 220 | output_format, 221 | ] 222 | 223 | cmd_list.append(generate_ceph_cmd(sub_cmd=['auth'], 224 | args=args)) 225 | 226 | return cmd_list 227 | 228 | 229 | def list_keys(): 230 | ''' 231 | List all CephX keys 232 | ''' 233 | 234 | cmd = [] 235 | 236 | args = [ 237 | 'ls', 238 | '-f', 239 | 'json', 240 | ] 241 | 242 | cmd.append(generate_ceph_cmd(sub_cmd=['auth'], 243 | args=args)) 244 | 245 | return cmd 246 | 247 | 248 | def exec_commands(module, cmd_list): 249 | ''' 250 | Execute command(s) 251 | ''' 252 | 253 | for cmd in cmd_list: 254 | rc, out, err = module.run_command(cmd) 255 | if rc != 0: 256 | return rc, cmd, out, err 257 | 258 | return rc, cmd, out, err 259 | 260 | 261 | def run_module(): 262 | module_args = dict( 263 | name=dict(type='str', required=False), 264 | state=dict(type='str', required=False, default='present', choices=['present', 'absent', # noqa: E501 265 | 'list', 'info']), # noqa: E501 266 | caps=dict(type='dict', required=False, default={}), 267 | output_format=dict(type='str', required=False, default='json', choices=['json', 'plain', 'xml', 'yaml']) # noqa: E501 268 | ) 269 | 270 | module = AnsibleModule( 271 | argument_spec=module_args, 272 | supports_check_mode=True 273 | ) 274 | 275 | # Gather module parameters in variables 276 | state = module.params['state'] 277 | name = module.params.get('name') 278 | caps = module.params.get('caps') 279 | output_format = module.params.get('output_format') 280 | 281 | changed = False 282 | 283 | result = dict( 284 | changed=changed, 285 | stdout='', 286 | stderr='', 287 | rc=0, 288 | start='', 289 | end='', 290 | delta='', 291 | ) 292 | 293 | if module.check_mode: 294 | module.exit_json(**result) 295 | 296 | startd = datetime.datetime.now() 297 | 298 | # Test if the key exists, if it does we skip its creation 299 | # We only want to run this check when a key needs to be added 300 | # There is no guarantee that any cluster is running and we don't need one 301 | _caps = caps 302 | key_exist = 1 303 | 304 | if state == "present": 305 | _info_key = [] 306 | rc, cmd, out, err = exec_commands( 307 | module, info_key(name, output_format)) # noqa: E501 308 | key_exist = rc 309 | if not caps and key_exist != 0: 310 | fatal("Capabilities must be provided when state is 'present'", module) # noqa: E501 311 | if key_exist == 0: 312 | _info_key = json.loads(out) 313 | if not caps: 314 | caps = _info_key[0]['caps'] 315 | _caps = _info_key[0]['caps'] 316 | if caps == _caps: 317 | result["stdout"] = "{0} already exists and doesn't need to be updated.".format(name) # noqa: E501 318 | result["rc"] = 0 319 | module.exit_json(**result) 320 | else: 321 | rc, cmd, out, err = exec_commands(module, update_key(name, caps)) # noqa: E501 322 | if rc != 0: 323 | result["msg"] = "Couldn't update caps for {0}".format(name) 324 | result["stderr"] = err 325 | module.fail_json(**result) 326 | changed = True 327 | 328 | else: 329 | rc, cmd, out, err = exec_commands(module, create_key(name, caps)) # noqa: E501 330 | if rc != 0: 331 | result["msg"] = "Couldn't create {0}".format(name) 332 | result["stderr"] = err 333 | module.fail_json(**result) 334 | changed = True 335 | 336 | elif state == "absent": 337 | rc, cmd, out, err = exec_commands( 338 | module, info_key(name, output_format)) # noqa: E501 339 | key_exist = rc 340 | if key_exist == 0: 341 | rc, cmd, out, err = exec_commands( 342 | module, delete_key(name)) # noqa: E501 343 | if rc == 0: 344 | changed = True 345 | else: 346 | rc = 0 347 | 348 | elif state == "info": 349 | rc, cmd, out, err = exec_commands( 350 | module, info_key(name, output_format)) # noqa: E501 351 | 352 | elif state == "list": 353 | rc, cmd, out, err = exec_commands( 354 | module, list_keys()) 355 | 356 | endd = datetime.datetime.now() 357 | delta = endd - startd 358 | 359 | result = dict( 360 | cmd=cmd, 361 | start=str(startd), 362 | end=str(endd), 363 | delta=str(delta), 364 | rc=rc, 365 | stdout=out.rstrip("\r\n"), 366 | stderr=err.rstrip("\r\n"), 367 | name=name, 368 | changed=changed, 369 | ) 370 | 371 | if rc != 0: 372 | module.fail_json(msg='non-zero return code', **result) 373 | 374 | module.exit_json(**result) 375 | 376 | 377 | def main(): 378 | run_module() 379 | 380 | 381 | if __name__ == '__main__': 382 | main() 383 | -------------------------------------------------------------------------------- /plugins/modules/cephadm_pool.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | # Copyright 2020, Red Hat, Inc. 4 | # Copyright 2021, StackHPC, Ltd. 5 | # NOTE: Files adapted from github.com/ceph/ceph-ansible 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | from __future__ import absolute_import, division, print_function 20 | __metaclass__ = type 21 | 22 | DOCUMENTATION = r''' 23 | module: cephadm_pool 24 | author: 25 | - Guillaume Abrioux 26 | - Michal Nasiadka 27 | short_description: Manage Ceph Pools 28 | version_added: "1.4.0" 29 | description: 30 | - Manage Ceph pool(s) creation, deletion and updates. 31 | options: 32 | name: 33 | description: 34 | - name of the Ceph pool 35 | required: true 36 | type: str 37 | state: 38 | description: 39 | - If 'present' is used, the module creates a pool if it doesn't 40 | exist or update it if it already exists. 41 | If 'absent' is used, the module will simply delete the pool. 42 | If 'list' is used, the module will return all details about the 43 | existing pools. (json formatted). 44 | required: false 45 | choices: ['present', 'absent', 'list'] 46 | default: present 47 | type: str 48 | details: 49 | description: 50 | - show details when state is list 51 | required: false 52 | type: bool 53 | size: 54 | description: 55 | - set the replica size of the pool. 56 | required: false 57 | type: str 58 | min_size: 59 | description: 60 | - set the min_size parameter of the pool. 61 | required: false 62 | default: default to `osd_pool_default_min_size` (ceph) 63 | type: str 64 | pg_num: 65 | description: 66 | - set the pg_num of the pool. 67 | required: false 68 | default: default to `osd_pool_default_pg_num` (ceph) 69 | type: str 70 | pgp_num: 71 | description: 72 | - set the pgp_num of the pool. 73 | required: false 74 | default: default to `osd_pool_default_pgp_num` (ceph) 75 | type: str 76 | pg_autoscale_mode: 77 | description: 78 | - set the pg autoscaler on the pool. 79 | required: false 80 | default: 'on' 81 | type: str 82 | target_size_ratio: 83 | description: 84 | - set the target_size_ratio on the pool 85 | required: false 86 | type: str 87 | pool_type: 88 | description: 89 | - set the pool type, either 'replicated' or 'erasure' 90 | required: false 91 | default: replicated 92 | choices: ['replicated', 'erasure'] 93 | type: str 94 | erasure_profile: 95 | description: 96 | - When pool_type = 'erasure', set the erasure profile of the pool 97 | required: false 98 | default: default 99 | type: str 100 | rule_name: 101 | description: 102 | - Set the crush rule name assigned to the pool 103 | required: false 104 | type: str 105 | expected_num_objects: 106 | description: 107 | - Set the expected_num_objects parameter of the pool. 108 | required: false 109 | default: "0" 110 | type: str 111 | application: 112 | description: 113 | - Set the pool application on the pool. 114 | required: false 115 | default: None 116 | type: str 117 | allow_ec_overwrites: 118 | description: 119 | - Set the allow_ec_overwrites paramter of the pool. 120 | required: false 121 | default: false 122 | type: bool 123 | ''' 124 | 125 | EXAMPLES = r''' 126 | - name: Create Ceph pools 127 | hosts: all 128 | become: true 129 | tasks: 130 | - name: Create a pool 131 | ceph_pool: 132 | name: "{{ item.name }}" 133 | state: present 134 | size: "{{ item.size }}" 135 | application: "{{ item.application }}" 136 | pool_type: "{{ item.pool_type }}" 137 | pg_autoscale_mode: "{{ item.pg_autoscale_mode }}" 138 | with_items: "{{ pools }}" 139 | ''' 140 | 141 | RETURN = r'''# ''' 142 | 143 | from ansible.module_utils.basic import AnsibleModule 144 | from ansible_collections.stackhpc.cephadm.plugins.module_utils.cephadm_common \ 145 | import generate_ceph_cmd, exec_command, exit_module 146 | 147 | import datetime 148 | import json 149 | 150 | 151 | def check_pool_exist(name, 152 | output_format='json'): 153 | ''' 154 | Check if a given pool exists 155 | ''' 156 | 157 | args = ['stats', name, '-f', output_format] 158 | 159 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 160 | args=args) 161 | 162 | return cmd 163 | 164 | 165 | def get_application_pool(name, 166 | output_format='json'): 167 | ''' 168 | Get application type enabled on a given pool 169 | ''' 170 | 171 | args = ['application', 'get', name, '-f', output_format] 172 | 173 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 174 | args=args) 175 | 176 | return cmd 177 | 178 | 179 | def enable_application_pool(name, 180 | application): 181 | ''' 182 | Enable application on a given pool 183 | ''' 184 | 185 | args = ['application', 'enable', name, application] 186 | 187 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 188 | args=args) 189 | 190 | return cmd 191 | 192 | 193 | def disable_application_pool(name, 194 | application): 195 | ''' 196 | Disable application on a given pool 197 | ''' 198 | 199 | args = ['application', 'disable', name, 200 | application, '--yes-i-really-mean-it'] 201 | 202 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 203 | args=args) 204 | 205 | return cmd 206 | 207 | 208 | def get_pool_ec_overwrites(name, output_format='json'): 209 | ''' 210 | Get EC overwrites on a given pool 211 | ''' 212 | 213 | args = ['get', name, 'allow_ec_overwrites', 214 | '-f', output_format] 215 | 216 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 217 | args=args) 218 | 219 | return cmd 220 | 221 | 222 | def enable_ec_overwrites(name): 223 | ''' 224 | Enable EC overwrites on a given pool 225 | ''' 226 | 227 | args = ['set', name, 'allow_ec_overwrites', 228 | 'true'] 229 | 230 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 231 | args=args) 232 | 233 | return cmd 234 | 235 | 236 | def disable_ec_overwrites(name): 237 | ''' 238 | Disable EC overwrites on a given pool 239 | ''' 240 | 241 | args = ['set', name, 'allow_ec_overwrites', 242 | 'false'] 243 | 244 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 245 | args=args) 246 | 247 | return cmd 248 | 249 | 250 | def get_pool_details(module, 251 | name, 252 | output_format='json'): 253 | ''' 254 | Get details about a given pool 255 | ''' 256 | 257 | args = ['ls', 'detail', '-f', output_format] 258 | 259 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 260 | args=args) 261 | 262 | rc, cmd, out, err = exec_command(module, cmd) 263 | 264 | if rc == 0: 265 | out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0] 266 | 267 | _rc, _cmd, application_pool, _err = exec_command(module, 268 | get_application_pool(name) 269 | ) 270 | 271 | # This is a trick because "target_size_ratio" isn't present at the same 272 | # level in the dict 273 | # ie: 274 | # { 275 | # 'pg_num': 8, 276 | # 'pgp_num': 8, 277 | # 'pg_autoscale_mode': 'on', 278 | # 'options': { 279 | # 'target_size_ratio': 0.1 280 | # } 281 | # } 282 | # If 'target_size_ratio' is present in 'options', we set it, this way we 283 | # end up with a dict containing all needed keys at the same level. 284 | if 'target_size_ratio' in out['options'].keys(): 285 | out['target_size_ratio'] = out['options']['target_size_ratio'] 286 | else: 287 | out['target_size_ratio'] = None 288 | 289 | application = list(json.loads(application_pool.strip()).keys()) 290 | 291 | if len(application) == 0: 292 | out['application'] = '' 293 | else: 294 | out['application'] = application[0] 295 | 296 | return rc, cmd, out, err 297 | 298 | 299 | def compare_pool_config(user_pool_config, running_pool_details): 300 | ''' 301 | Compare user input config pool details with current running pool details 302 | ''' 303 | 304 | delta = {} 305 | filter_keys = ['pg_num', 'pg_placement_num', 'size', 306 | 'pg_autoscale_mode', 'target_size_ratio'] 307 | for key in filter_keys: 308 | if (str(running_pool_details[key]) != user_pool_config[key]['value'] and # noqa: E501 309 | user_pool_config[key]['value']): 310 | delta[key] = user_pool_config[key] 311 | 312 | if (running_pool_details['application'] != 313 | user_pool_config['application']['value'] and 314 | user_pool_config['application']['value']): 315 | delta['application'] = {} 316 | delta['application']['new_application'] = user_pool_config['application']['value'] # noqa: E501 317 | # to be improved (for update_pools()...) 318 | delta['application']['value'] = delta['application']['new_application'] 319 | delta['application']['old_application'] = running_pool_details['application'] # noqa: E501 320 | 321 | return delta 322 | 323 | 324 | def list_pools(details, 325 | output_format='json'): 326 | ''' 327 | List existing pools 328 | ''' 329 | 330 | args = ['ls'] 331 | 332 | if details: 333 | args.append('detail') 334 | 335 | args.extend(['-f', output_format]) 336 | 337 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 338 | args=args) 339 | 340 | return cmd 341 | 342 | 343 | def create_pool(name, 344 | user_pool_config): 345 | ''' 346 | Create a new pool 347 | ''' 348 | 349 | args = ['create', user_pool_config['pool_name']['value'], 350 | user_pool_config['type']['value']] 351 | 352 | if user_pool_config['pg_autoscale_mode']['value'] != 'on': 353 | args.extend(['--pg_num', 354 | user_pool_config['pg_num']['value'], 355 | '--pgp_num', 356 | user_pool_config['pgp_num']['value']]) 357 | elif user_pool_config['target_size_ratio']['value']: 358 | args.extend(['--target_size_ratio', 359 | user_pool_config['target_size_ratio']['value']]) 360 | 361 | if user_pool_config['type']['value'] == 'replicated': 362 | args.extend([user_pool_config['crush_rule']['value'], 363 | '--expected_num_objects', 364 | user_pool_config['expected_num_objects']['value'], 365 | '--autoscale-mode', 366 | user_pool_config['pg_autoscale_mode']['value']]) 367 | 368 | if (user_pool_config['size']['value'] and 369 | user_pool_config['type']['value'] == "replicated"): 370 | args.extend(['--size', user_pool_config['size']['value']]) 371 | 372 | elif user_pool_config['type']['value'] == 'erasure': 373 | args.extend([user_pool_config['erasure_profile']['value']]) 374 | 375 | if user_pool_config['crush_rule']['value']: 376 | args.extend([user_pool_config['crush_rule']['value']]) 377 | 378 | args.extend(['--expected_num_objects', 379 | user_pool_config['expected_num_objects']['value'], 380 | '--autoscale-mode', 381 | user_pool_config['pg_autoscale_mode']['value']]) 382 | 383 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 384 | args=args) 385 | 386 | return cmd 387 | 388 | 389 | def remove_pool(name): 390 | ''' 391 | Remove a pool 392 | ''' 393 | 394 | args = ['rm', name, name, '--yes-i-really-really-mean-it'] 395 | 396 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 397 | args=args) 398 | 399 | return cmd 400 | 401 | 402 | def update_pool(module, name, delta): 403 | ''' 404 | Update an existing pool 405 | ''' 406 | 407 | report = "" 408 | 409 | for key in delta.keys(): 410 | if key != 'application': 411 | args = ['set', 412 | name, 413 | delta[key]['cli_set_opt'], 414 | delta[key]['value']] 415 | 416 | cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'], 417 | args=args) 418 | 419 | rc, cmd, out, err = exec_command(module, cmd) 420 | if rc != 0: 421 | return rc, cmd, out, err 422 | 423 | else: 424 | rc, cmd, out, err = exec_command(module, disable_application_pool(name, delta['application']['old_application'])) # noqa: E501 425 | if rc != 0: 426 | return rc, cmd, out, err 427 | 428 | rc, cmd, out, err = exec_command(module, enable_application_pool(name, delta['application']['new_application'])) # noqa: E501 429 | if rc != 0: 430 | return rc, cmd, out, err 431 | 432 | report = report + "\n" + "{0} has been updated: {1} is now {2}".format(name, key, delta[key]['value']) # noqa: E501 433 | 434 | out = report 435 | return rc, cmd, out, err 436 | 437 | 438 | def run_module(): 439 | module_args = dict( 440 | name=dict(type='str', required=True), 441 | state=dict(type='str', required=False, default='present', 442 | choices=['present', 'absent', 'list']), 443 | details=dict(type='bool', required=False, default=False), 444 | size=dict(type='str', required=False), 445 | min_size=dict(type='str', required=False), 446 | pg_num=dict(type='str', required=False), 447 | pgp_num=dict(type='str', required=False), 448 | pg_autoscale_mode=dict(type='str', required=False, default='on'), 449 | target_size_ratio=dict(type='str', required=False, default=None), 450 | pool_type=dict(type='str', required=False, default='replicated', 451 | choices=['replicated', 'erasure']), 452 | erasure_profile=dict(type='str', required=False, default='default'), 453 | rule_name=dict(type='str', required=False, default=None), 454 | expected_num_objects=dict(type='str', required=False, default="0"), 455 | application=dict(type='str', required=False, default=None), 456 | allow_ec_overwrites=dict(type='bool', required=False, default=False) 457 | ) 458 | 459 | module = AnsibleModule( 460 | argument_spec=module_args, 461 | supports_check_mode=True 462 | ) 463 | 464 | # Gather module parameters in variables 465 | name = module.params.get('name') 466 | state = module.params.get('state') 467 | details = module.params.get('details') 468 | size = module.params.get('size') 469 | min_size = module.params.get('min_size') 470 | pg_num = module.params.get('pg_num') 471 | pgp_num = module.params.get('pgp_num') 472 | pg_autoscale_mode = module.params.get('pg_autoscale_mode') 473 | target_size_ratio = module.params.get('target_size_ratio') 474 | application = module.params.get('application') 475 | allow_ec_overwrites = module.params.get('allow_ec_overwrites') 476 | 477 | if (module.params.get('pg_autoscale_mode').lower() in 478 | ['true', 'on', 'yes']): 479 | pg_autoscale_mode = 'on' 480 | elif (module.params.get('pg_autoscale_mode').lower() in 481 | ['false', 'off', 'no']): 482 | pg_autoscale_mode = 'off' 483 | else: 484 | pg_autoscale_mode = 'warn' 485 | 486 | if module.params.get('pool_type') == '1': 487 | pool_type = 'replicated' 488 | elif module.params.get('pool_type') == '3': 489 | pool_type = 'erasure' 490 | else: 491 | pool_type = module.params.get('pool_type') 492 | 493 | if not module.params.get('rule_name'): 494 | rule_name = 'replicated_rule' if pool_type == 'replicated' else None 495 | else: 496 | rule_name = module.params.get('rule_name') 497 | 498 | erasure_profile = module.params.get('erasure_profile') 499 | expected_num_objects = module.params.get('expected_num_objects') 500 | user_pool_config = { 501 | 'pool_name': {'value': name}, 502 | 'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'}, 503 | 'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'}, 504 | 'pg_autoscale_mode': {'value': pg_autoscale_mode, 505 | 'cli_set_opt': 'pg_autoscale_mode'}, 506 | 'target_size_ratio': {'value': target_size_ratio, 507 | 'cli_set_opt': 'target_size_ratio'}, 508 | 'application': {'value': application}, 509 | 'type': {'value': pool_type}, 510 | 'erasure_profile': {'value': erasure_profile}, 511 | 'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'}, 512 | 'expected_num_objects': {'value': expected_num_objects}, 513 | 'size': {'value': size, 'cli_set_opt': 'size'}, 514 | 'min_size': {'value': min_size}, 515 | 'allow_ec_overwrites': {'value': allow_ec_overwrites} 516 | } 517 | 518 | if module.check_mode: 519 | module.exit_json( 520 | changed=False, 521 | stdout='', 522 | stderr='', 523 | rc=0, 524 | start='', 525 | end='', 526 | delta='', 527 | ) 528 | 529 | startd = datetime.datetime.now() 530 | changed = False 531 | 532 | if state == "present": 533 | rc, cmd, out, err = exec_command(module, 534 | check_pool_exist(name)) 535 | if rc == 0: 536 | running_pool_details = get_pool_details(module, 537 | name) 538 | user_pool_config['pg_placement_num'] = {'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num'} # noqa: E501 539 | delta = compare_pool_config(user_pool_config, 540 | running_pool_details[2]) 541 | 542 | if user_pool_config['type']['value'] == 'erasure': 543 | rc, cmd, ec_overwrites, err = exec_command(module, get_pool_ec_overwrites(name)) # noqa: E501 544 | running_pool_ec_overwrites = json.loads(ec_overwrites.strip()).get('allow_ec_overwrites') # noqa: E501 545 | if running_pool_ec_overwrites != user_pool_config['allow_ec_overwrites']['value']: # noqa: E501 546 | if user_pool_config['allow_ec_overwrites']['value']: 547 | rc, cmd, out, err = exec_command(module, enable_ec_overwrites(name)) # noqa: E501 548 | else: 549 | rc, cmd, out, err = exec_command(module, disable_ec_overwrites(name)) # noqa: E501 550 | if rc == 0: 551 | changed = True 552 | 553 | if len(delta) > 0: 554 | keys = list(delta.keys()) 555 | details = running_pool_details[2] 556 | if details['erasure_code_profile'] and 'size' in keys: 557 | del delta['size'] 558 | if details['pg_autoscale_mode'] == 'on': 559 | delta.pop('pg_num', None) 560 | delta.pop('pgp_num', None) 561 | 562 | if len(delta) == 0: 563 | out = "Skipping pool {0}.\nUpdating either 'size' on an erasure-coded pool or 'pg_num'/'pgp_num' on a pg autoscaled pool is incompatible".format(name) # noqa: E501 564 | else: 565 | rc, cmd, out, err = update_pool(module, 566 | name, 567 | delta) 568 | if rc == 0: 569 | changed = True 570 | 571 | else: 572 | out = "Pool {0} already exists and there is nothing to update.".format(name) # noqa: E501 573 | else: 574 | rc, cmd, out, err = exec_command(module, 575 | create_pool(name, 576 | user_pool_config=user_pool_config)) # noqa: E501 577 | if user_pool_config['application']['value']: 578 | rc, _, _, _ = exec_command(module, 579 | enable_application_pool(name, 580 | user_pool_config['application']['value'])) # noqa: E501 581 | if user_pool_config['min_size']['value']: 582 | # not implemented yet 583 | pass 584 | if user_pool_config['allow_ec_overwrites']['value']: 585 | rc, _, _, _ = exec_command(module, 586 | enable_ec_overwrites(name)) 587 | 588 | changed = True 589 | 590 | elif state == "list": 591 | rc, cmd, out, err = exec_command(module, 592 | list_pools(name, 593 | details)) 594 | if rc != 0: 595 | out = "Couldn't list pool(s) present on the cluster" 596 | 597 | elif state == "absent": 598 | rc, cmd, out, err = exec_command(module, 599 | check_pool_exist(name)) 600 | if rc == 0: 601 | rc, cmd, out, err = exec_command(module, 602 | remove_pool(name)) 603 | changed = True 604 | else: 605 | rc = 0 606 | out = "Skipped, since pool {0} doesn't exist".format(name) 607 | 608 | exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, 609 | changed=changed) 610 | 611 | 612 | def main(): 613 | run_module() 614 | 615 | 616 | if __name__ == '__main__': 617 | main() 618 | -------------------------------------------------------------------------------- /roles/cephadm/README.md: -------------------------------------------------------------------------------- 1 | # cephadm 2 | 3 | This role bootstraps and configures Ceph using cephadm. 4 | 5 | ## Prerequisites 6 | 7 | ### Host prerequisites 8 | 9 | * The role assumes target hosts connection over SSH with user that has passwordless sudo configured. 10 | * Either direct Internet access or private registry with desired Ceph image accessible to all hosts is required. 11 | 12 | ### Inventory 13 | 14 | This role assumes the existence of the following groups: 15 | 16 | * `ceph` 17 | * `mons` 18 | * `mgrs` 19 | * `osds` 20 | 21 | Optional groups (those services will be deployed when group exists):: 22 | 23 | * `rgws` 24 | 25 | All Ceph hosts must be in the `ceph` group. 26 | 27 | ## Role variables 28 | 29 | * General 30 | * `cephadm_ceph_release`: Ceph release to deploy (default: reef) 31 | * `cephadm_container_engine`: Whether to use docker_login or podman_login (default: docker) 32 | * `cephadm_fsid`: FSID to use for cluster (default: empty - cephadm will generate FSID) 33 | * `cephadm_recreate`: If existing cluster should be destroyed and recreated (default: false) 34 | * `cephadm_custom_repos`: Boolean: disables configuring offical Ceph YUM/APT repositories - `cephadm_ceph_release` is ignored. Can serve as a workaround for a lack of supported OS distro + Ceph release combination upstream. (default: false) 35 | * `cephadm_package_update`: If enabled - cephadm package will be updated to latest version (default: false) 36 | * `cephadm_host_labels`: If set (list format) - those additional labels will be applied to host definitions (default: [] - empty list) 37 | * Bootstrap settings 38 | * `cephadm_bootstrap_host`: The host on which to bootstrap Ceph (default: `groups['mons'][0]`) 39 | * `cephadm_enable_dashboard`: If enabled - dashboard service on MGR will be enabled (default: false) 40 | * `cephadm_enable_firewalld`: If enabled - firewalld will be installed and rules will be applied (default: false) 41 | * `cephadm_enable_monitoring`: If enabled - cephadm monitoring stack will be deployed i.e. prometheus/node-exporters/grafana (default: false) 42 | * `cephadm_image`: If set - cephadm will use this image 43 | * `cephadm_haproxy_image`: If set - cephadm will use this image for HAProxy in the ingress service 44 | * `cephadm_keepalived_image`: If set - cephadm will use this image for Keepalived in the ingress service 45 | * `cephadm_install_ceph_cli`: If enabled - ceph cli will be installed on the hosts (default: false) 46 | * `cephadm_ssh_public_key`: Location where ssh public key used by cephadm will be saved (default: /etc/ceph/cephadm.pub) 47 | * `cephadm_ssh_private_key`: Location where ssh private key used by cephadm will be saved (default: /etc/ceph/cephadm.id) 48 | * `cephadm_ssh_user`: Pre-existing user name that should be used for bootstrapping the cluster. User must have passwordless sudo enabled. Since 1.4.0 (default: `ansible_user`) 49 | * `cephadm_bootstrap_additional_parameters`: additional arguments to pass to `cephadm bootstrap` 50 | * `cephadm_apt_repo_dist`: overide (default) `ansible_distribution_release` for debian package repository 51 | * MONs and MGRs 52 | * `cephadm_mon_count`: Number of MONs to deploy (default: equals to number of hosts in `mons` Ansible group) 53 | * `cephadm_mgr_count`: Number of MGRs to deploy (default: equals to number of hosts in `mgrs` Ansible group) 54 | * OSDs 55 | * `cephadm_osd_devices`: List of /dev/device paths to use (e.g. for multipath devices that can't be used using an OSD spec) 56 | Example: 57 | ``` 58 | cephadm_osd_devices: 59 | - /dev/sdb 60 | - /dev/sdc 61 | ``` 62 | * `cephadm_osd_spec`: OSD spec to apply in YAML (recommended) or dict format 63 | Example: 64 | ``` 65 | cephadm_osd_spec: | 66 | service_type: osd 67 | service_id: osd_spec_default 68 | placement: 69 | host_pattern: '*' 70 | data_devices: 71 | model: MZ7KH960HAJR0D3 72 | db_devices: 73 | model: Dell Express Flash PM1725b 1.6TB SFF 74 | ``` 75 | * RGWs 76 | * `cephadm_radosgw_services`: List of Rados Gateways services to deploy. `id` is an arbitrary name for the service, 77 | `count_per_host` is desired number of RGW services per host. `networks` is optional list of networks to bind to. 78 | `spec` is optional additional service specification. Previously undocumented `port` variable is no longer supported. 79 | Example: 80 | ``` 81 | cephadm_radosgw_services: 82 | - id: myrgw 83 | count_per_host: 2 84 | networks: 85 | - 10.66.0.0/24 86 | spec: 87 | rgw_realm: myrealm 88 | rgw_zone: myzone 89 | rgw_frontend_port: 1234 90 | ``` 91 | * Ingress 92 | * `cephadm_ingress_services`: List of ingress services to deploy. `id` should match name (not id) of the RGW service to 93 | which ingress will point to. `spec` is a service specification required by Cephadm to deploy the ingress (haproxy + 94 | keepalived pair). 95 | Example: 96 | ``` 97 | cephadm_ingress_services: 98 | - id: rgw.myrgw 99 | spec: 100 | frontend_port: 443 101 | monitor_port: 1967 102 | virtual_ip: 10.66.0.1/24 103 | ssl_cert: {example_certificate_chain} 104 | ``` 105 | Note that adding RGW or other services to an existing deployment requires setting `cephadm_bootstrap` variable to *true*. 106 | 107 | * Registry 108 | * `cephadm_registry_url`: (default: not used) 109 | * `cephadm_registry_username`: (default: not used) 110 | * `cephadm_registry_password`: (default: not used) 111 | 112 | * Networking 113 | * Mandatory 114 | * `cephadm_public_interface`: Public interface (mandatory) 115 | * `cephadm_public_network`: Public network including CIDR (mandatory) 116 | * Optional 117 | * `cephadm_admin_interface`: Admin interface (default: use ``cephadm_public_interface``) 118 | * `cephadm_cluster_interface`: Cluster interface (optional - if not defined ceph will not use dedicated cluster network) 119 | * `cephadm_cluster_network`: Cluster network including CIDR (optional - if not defined ceph will not use dedicated cluster network) 120 | -------------------------------------------------------------------------------- /roles/cephadm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cephadm_ceph_release: reef 3 | cephadm_skip_prechecks: false 4 | # FSID 5 | cephadm_fsid: "" 6 | # Recreate cluster 7 | cephadm_recreate: false 8 | # Packages 9 | # NOTE(seunghun1ee): Set cephadm_custom_repos to true by default when host OS 10 | # is Ubuntu Noble. 11 | # This is because Ceph images for Noble are coming from Ubuntu repository 12 | # https://packages.ubuntu.com/noble-updates/cephadm 13 | cephadm_custom_repos: "{{ ansible_facts.distribution_release == 'noble' }}" 14 | cephadm_package_update: false 15 | # Images 16 | cephadm_image: "" 17 | cephadm_haproxy_image: "" 18 | cephadm_keepalived_image: "" 19 | # Registry 20 | cephadm_registry_url: "" 21 | cephadm_registry_username: "" 22 | cephadm_registry_password: "" 23 | # Bootstrap settings 24 | cephadm_bootstrap_host: "{{ groups['mons'][0] }}" 25 | cephadm_enable_dashboard: false 26 | cephadm_enable_firewalld: false 27 | cephadm_enable_monitoring: false 28 | cephadm_host_labels: [] 29 | cephadm_install_ceph_cli: false 30 | cephadm_ssh_public_key: "/etc/ceph/cephadm.pub" 31 | cephadm_ssh_private_key: "/etc/ceph/cephadm.id" 32 | cephadm_ssh_user: "{{ ansible_user }}" 33 | cephadm_bootstrap_additional_parameters: "" 34 | cephadm_apt_repo_dist: "{{ ansible_facts.distribution_release }}" 35 | # MONs and MGRs 36 | cephadm_mon_count: "{{ groups.get('mons', []) | length }}" 37 | cephadm_mgr_count: "{{ groups.get('mgrs', []) | length }}" 38 | # Networking 39 | cephadm_admin_interface: "{{ cephadm_public_interface }}" 40 | cephadm_public_interface: "" 41 | cephadm_cluster_interface: "" 42 | cephadm_public_network: "" 43 | cephadm_cluster_network: "" 44 | # OSDs 45 | cephadm_osd_devices: [] 46 | cephadm_osd_spec: [] 47 | # RADOSGW 48 | cephadm_radosgw_services: [] 49 | # Ingress 50 | cephadm_ingress_services: [] 51 | # Container Engine 52 | cephadm_container_engine: "docker" 53 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Bootstrap cephadm 3 | vars: 4 | mon_ip: "{{ hostvars[inventory_hostname].ansible_facts[cephadm_public_interface | replace('-', '_')].ipv4.address }}" 5 | monitoring_stack: "{{ '--skip-monitoring-stack' if not (cephadm_enable_monitoring | bool) else '' }}" 6 | dashboard: "{{ '--skip-dashboard' if not cephadm_enable_dashboard | bool else '' }}" 7 | firewalld: "{{ '--skip-firewalld' if not cephadm_enable_firewalld | bool else '' }}" 8 | command: 9 | cmd: > 10 | cephadm 11 | {% if cephadm_image | length > 0 %} 12 | --image={{ cephadm_image }} 13 | {% endif %} 14 | bootstrap 15 | {{ monitoring_stack }} 16 | {{ dashboard }} 17 | {{ firewalld }} 18 | --ssh-private-key={{ cephadm_ssh_private_key }} 19 | --ssh-public-key={{ cephadm_ssh_public_key }} 20 | {% if cephadm_cluster_network | length > 0 %} 21 | --cluster-network {{ cephadm_cluster_network }} 22 | {% endif %} 23 | {% if cephadm_ssh_user | length > 0 %} 24 | --ssh-user "{{ cephadm_ssh_user }}" 25 | {% endif %} 26 | {% if cephadm_registry_url | length > 0 %} 27 | --registry-url "{{ cephadm_registry_url }}" 28 | --registry-username "{{ cephadm_registry_username }}" 29 | --registry-password "{{ cephadm_registry_password }}" 30 | {% endif %} 31 | --skip-pull 32 | {% if cephadm_fsid | length > 0 %} 33 | --fsid={{ cephadm_fsid }} 34 | {% endif %} 35 | --mon-ip={{ mon_ip }} 36 | {{ cephadm_bootstrap_additional_parameters }} 37 | become: true 38 | changed_when: true 39 | when: not cephadm_check_ceph_conf.stat.exists 40 | 41 | - name: Set public network 42 | command: 43 | cmd: "cephadm shell -- ceph config set mon public_network {{ cephadm_public_network }}" 44 | become: true 45 | changed_when: true 46 | 47 | - name: Set cluster network 48 | command: 49 | cmd: "cephadm shell -- ceph config set global cluster_network {{ cephadm_cluster_network }}" 50 | when: cephadm_cluster_network | length > 0 51 | become: true 52 | changed_when: true 53 | 54 | - name: Set HAProxy image 55 | command: 56 | cmd: "cephadm shell -- ceph config set mgr mgr/cephadm/container_image_haproxy {{ cephadm_haproxy_image }}" 57 | when: cephadm_haproxy_image | length > 0 58 | become: true 59 | changed_when: true 60 | 61 | - name: Set Keepalived image 62 | command: 63 | cmd: "cephadm shell -- ceph config set mgr mgr/cephadm/container_image_keepalived {{ cephadm_keepalived_image }}" 64 | when: cephadm_keepalived_image | length > 0 65 | become: true 66 | changed_when: true 67 | 68 | - name: Get cluster fsid 69 | command: 70 | cmd: "cephadm shell -- ceph fsid" 71 | when: cephadm_fsid | length == 0 72 | become: true 73 | changed_when: false 74 | register: cephadm_fsid_current 75 | 76 | - name: Template out cluster.yml 77 | vars: 78 | fsid: "{{ cephadm_fsid if cephadm_fsid | length > 0 else cephadm_fsid_current.stdout }}" 79 | template: 80 | src: "templates/cluster.yml.j2" 81 | dest: "/var/run/ceph/{{ fsid }}/cephadm_cluster.yml" 82 | owner: root 83 | group: root 84 | mode: "0644" 85 | become: true 86 | run_once: true 87 | 88 | - name: Apply spec 89 | command: 90 | cmd: > 91 | cephadm shell -- 92 | ceph orch apply -i /var/run/ceph/cephadm_cluster.yml 93 | changed_when: true 94 | become: true 95 | 96 | - name: Install ceph cli on mon hosts 97 | command: 98 | cmd: "cephadm install ceph" 99 | become: true 100 | changed_when: true 101 | when: cephadm_install_ceph_cli 102 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get Ceph FSID 3 | command: 4 | cmd: "cephadm shell -- ceph fsid" 5 | become: true 6 | register: cephadm_destroy_fsid 7 | changed_when: false 8 | failed_when: false 9 | when: inventory_hostname == cephadm_bootstrap_host 10 | 11 | - name: Destroy cluster 12 | vars: 13 | fsid_result: "{{ hostvars[cephadm_bootstrap_host].cephadm_destroy_fsid }}" 14 | command: 15 | cmd: "cephadm rm-cluster --fsid {{ fsid_result.stdout }} --force" 16 | become: true 17 | changed_when: true 18 | when: fsid_result.rc != 1 19 | 20 | - name: Remove ssh keys 21 | file: 22 | path: "{{ item }}" 23 | state: absent 24 | with_items: 25 | - "{{ cephadm_ssh_private_key }}" 26 | - "{{ cephadm_ssh_public_key }}" 27 | become: true 28 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Destroy cephadm deployment" 3 | include_tasks: "destroy.yml" 4 | when: 5 | - cephadm_recreate | bool 6 | 7 | - name: "Run prechecks" 8 | include_tasks: "prechecks.yml" 9 | when: not cephadm_skip_prechecks | bool 10 | 11 | - name: "Install packages" 12 | include_tasks: "pkg_{{ ansible_facts.os_family | lower }}.yml" 13 | 14 | - name: "Bootstrap prereqs" 15 | include_tasks: "prereqs_bootstrap.yml" 16 | when: 17 | - inventory_hostname == cephadm_bootstrap_host 18 | 19 | - name: "Prereq tasks" 20 | import_tasks: "prereqs.yml" 21 | 22 | - name: "Bootstrap" 23 | include_tasks: "bootstrap.yml" 24 | when: 25 | - cephadm_bootstrap | bool 26 | - inventory_hostname == cephadm_bootstrap_host 27 | 28 | - name: "Add osds individually" 29 | import_tasks: "osds.yml" 30 | 31 | - name: "Ensure osd spec is defined" 32 | include_tasks: "osds_spec.yml" 33 | when: 34 | - cephadm_osd_spec | length > 0 35 | - inventory_hostname == cephadm_bootstrap_host 36 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/openstack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/osds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # We need to register this as a variable, otherwise it will not be interpreted 3 | # correctly when setting ansible_host in the next task. 4 | - name: Set a fact about the Ansible host 5 | set_fact: 6 | mon_ansible_host: "{{ hostvars[inventory_hostname].ansible_host | default(inventory_hostname) }}" 7 | 8 | - name: Add OSDs individually 9 | command: 10 | cmd: > 11 | cephadm shell -- 12 | ceph orch daemon add osd {{ ansible_facts.nodename }}:{{ item }} 13 | become: true 14 | register: osd_add_result 15 | changed_when: not osd_add_result.stdout.startswith("Created no osd(s) on host") 16 | delegate_to: "{{ omit if 'mons' in group_names else groups['mons'][0] }}" 17 | when: cephadm_osd_devices | length > 0 18 | with_items: "{{ cephadm_osd_devices }}" 19 | vars: 20 | # NOTE: Without this, the delegate hosts's ansible_host variable will not 21 | # be respected. 22 | ansible_host: "{{ mon_ansible_host if 'mons' in group_names else hostvars[groups['mons'][0]].ansible_host }}" 23 | until: osd_add_result.rc == 0 24 | retries: 3 25 | delay: 10 26 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/osds_spec.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get cluster fsid 3 | command: 4 | cmd: "cephadm shell -- ceph fsid" 5 | when: cephadm_fsid | length == 0 6 | become: true 7 | register: cephadm_fsid_current 8 | changed_when: false 9 | 10 | - name: Template out osd_spec.yml 11 | vars: 12 | fsid: "{{ cephadm_fsid if cephadm_fsid | length > 0 else cephadm_fsid_current.stdout }}" 13 | copy: 14 | content: "{{ cephadm_osd_spec | to_nice_yaml if cephadm_osd_spec is mapping else cephadm_osd_spec }}" 15 | dest: "/var/run/ceph/{{ fsid }}/osd_spec.yml" 16 | owner: root 17 | group: root 18 | mode: "0o644" 19 | become: true 20 | 21 | - name: Apply OSDs spec 22 | command: 23 | cmd: > 24 | cephadm shell -- 25 | ceph orch apply -i /var/run/ceph/osd_spec.yml 26 | become: true 27 | changed_when: true 28 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/pkg_debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Remove any old Ceph keys added to the main keyring. 3 | - name: Clean up old key 4 | apt_key: 5 | id: E84AC2C0460F3994 6 | state: absent 7 | become: true 8 | 9 | - name: Ensure keys directory exists 10 | file: 11 | path: "{{ cephadm_apt_key_path | dirname }}" 12 | owner: root 13 | group: root 14 | mode: "0755" 15 | state: directory 16 | when: not cephadm_custom_repos | bool 17 | become: true 18 | 19 | - name: Ensure keys exist 20 | get_url: 21 | url: "{{ cephadm_apt_key_url }}" 22 | dest: "{{ cephadm_apt_key_path }}" 23 | owner: root 24 | group: root 25 | mode: "0644" 26 | when: not cephadm_custom_repos | bool 27 | become: true 28 | 29 | - name: Ensure Ceph repositories are defined 30 | apt_repository: 31 | repo: "deb [signed-by={{ cephadm_apt_key_path }}] https://download.ceph.com/debian-{{ item }}/ {{ cephadm_apt_repo_dist }} main" 32 | state: "{{ 'present' if item == cephadm_ceph_release else 'absent' }}" 33 | when: not cephadm_custom_repos | bool 34 | become: true 35 | loop: "{{ cephadm_ceph_releases }}" 36 | 37 | - name: Install cephadm package 38 | apt: 39 | name: "cephadm" 40 | state: "{{ 'latest' if cephadm_package_update | bool else 'present' }}" 41 | update_cache: true 42 | become: true 43 | timeout: 600 44 | ignore_errors: true 45 | register: cephadm_install 46 | retries: 3 47 | delay: 20 48 | 49 | # NOTE(Alex-Welsh): The cephadm install task sometimes hangs indefinitely after 50 | # the package is installed. This is a workaround to ensure the playbook 51 | # completes properly. 52 | - name: Ensure Cephadm package has installed properly 53 | apt: 54 | name: "cephadm" 55 | state: "{{ 'latest' if cephadm_package_update | bool else 'present' }}" 56 | update_cache: true 57 | become: true 58 | timeout: 60 59 | when: cephadm_install is failed 60 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/pkg_redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure centos-release-ceph-octopus package is removed 3 | dnf: 4 | name: "centos-release-ceph-octopus" 5 | state: absent 6 | when: not cephadm_custom_repos | bool 7 | become: true 8 | 9 | - name: Add Ceph gpgkey 10 | rpm_key: 11 | key: "https://download.ceph.com/keys/release.asc" 12 | state: present 13 | when: not cephadm_custom_repos | bool 14 | become: true 15 | 16 | - name: Ensure Ceph repositories are defined 17 | yum_repository: 18 | file: "ceph" 19 | name: "ceph-{{ item.0 + '-' + item.1 if item.0 == 'noarch' else item.1 }}" 20 | description: "Ceph {{ item.1 }} repo {{ item.0 }}" 21 | baseurl: "https://download.ceph.com/rpm-{{ item.1 }}/el{{ ansible_facts.distribution_major_version }}/{{ item.0 }}" 22 | gpgcheck: true 23 | gpgkey: "https://download.ceph.com/keys/release.asc" 24 | state: "{{ 'present' if item.1 == cephadm_ceph_release else 'absent' }}" 25 | when: not cephadm_custom_repos | bool 26 | become: true 27 | loop: "{{ cephadm_rpm_repos | product(cephadm_ceph_releases) | list }}" 28 | 29 | - name: Install cephadm package 30 | dnf: 31 | name: "cephadm" 32 | install_weak_deps: false 33 | state: "{{ 'latest' if cephadm_package_update | bool else 'present' }}" 34 | update_cache: true 35 | become: true 36 | timeout: 600 37 | ignore_errors: true 38 | register: cephadm_install 39 | retries: 3 40 | delay: 20 41 | 42 | 43 | # NOTE(Alex-Welsh): The cephadm install task sometimes hangs indefinitely after 44 | # the package is installed. This is a workaround to ensure the playbook 45 | # completes properly. 46 | - name: Ensure Cephadm package has installed properly 47 | dnf: 48 | name: "cephadm" 49 | install_weak_deps: false 50 | state: "{{ 'latest' if cephadm_package_update | bool else 'present' }}" 51 | update_cache: true 52 | become: true 53 | timeout: 60 54 | when: cephadm_install is failed 55 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/prechecks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Populate service facts 3 | service_facts: 4 | 5 | - name: Set cephadm_bootstrap 6 | set_fact: 7 | cephadm_bootstrap: "{{ ansible_facts.services | dict2items | selectattr('key', 'match', '^ceph.*') | list | length == 0 }}" 8 | 9 | - name: Check if specified container engine is installed 10 | command: "which {{ cephadm_container_engine }}" 11 | changed_when: false 12 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/prereqs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy cephadm public key to all hosts 3 | ansible.posix.authorized_key: 4 | user: "{{ cephadm_ssh_user }}" 5 | state: present 6 | key: "{{ hostvars[cephadm_bootstrap_host].cephadm_ssh_public_key_content }}" 7 | when: "cephadm_ssh_user | length > 0" 8 | become: true 9 | 10 | - name: Ensure the Logrotate package is installed 11 | package: 12 | name: logrotate 13 | state: present 14 | become: true 15 | 16 | - name: Log into Docker registry 17 | containers.podman.podman_login: 18 | registry: "{{ cephadm_registry_url }}" 19 | username: "{{ cephadm_registry_username }}" 20 | password: "{{ cephadm_registry_password }}" 21 | when: 22 | - cephadm_registry_username | length > 0 23 | - cephadm_container_engine == 'podman' 24 | become: true 25 | 26 | - name: Log into Docker registry 27 | docker_login: 28 | registry: "{{ cephadm_registry_url }}" 29 | username: "{{ cephadm_registry_username }}" 30 | password: "{{ cephadm_registry_password }}" 31 | when: 32 | - cephadm_registry_username | length > 0 33 | - cephadm_container_engine == 'docker' 34 | become: true 35 | 36 | - name: Pull ceph image with Podman 37 | containers.podman.podman_image: 38 | name: "{{ cephadm_image }}" 39 | state: present 40 | when: 41 | - cephadm_image | length > 0 42 | - cephadm_container_engine == 'podman' 43 | become: true 44 | 45 | - name: Pull ceph image with Docker 46 | community.docker.docker_image: 47 | source: pull 48 | name: "{{ cephadm_image }}" 49 | state: present 50 | when: 51 | - cephadm_image | length > 0 52 | - cephadm_container_engine == 'docker' 53 | become: true 54 | -------------------------------------------------------------------------------- /roles/cephadm/tasks/prereqs_bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure /etc/ceph directory exists 3 | file: 4 | path: /etc/ceph 5 | state: directory 6 | owner: root 7 | group: root 8 | mode: "0o755" 9 | become: true 10 | 11 | - name: Check if /etc/ceph/ceph.conf exists 12 | stat: 13 | path: /etc/ceph/ceph.conf 14 | register: cephadm_check_ceph_conf 15 | 16 | - name: Check if cephadm ssh key exists 17 | stat: 18 | path: "{{ cephadm_ssh_private_key }}" 19 | register: cephadm_check_ceph_id 20 | 21 | - name: Generate ssh key for cephadm 22 | openssh_keypair: 23 | path: "{{ cephadm_ssh_private_key }}" 24 | size: 4096 25 | comment: "ceph-{{ cephadm_fsid }}" 26 | when: not cephadm_check_ceph_id.stat.exists 27 | register: cephadm_ssh_key 28 | become: true 29 | 30 | - name: Save public key 31 | copy: 32 | content: "{{ cephadm_ssh_key.public_key | default }}" 33 | dest: "{{ cephadm_ssh_public_key }}" 34 | owner: root 35 | group: root 36 | mode: "0o644" 37 | become: true 38 | when: not cephadm_check_ceph_id.stat.exists 39 | 40 | - name: Slurp public key 41 | slurp: 42 | src: "{{ cephadm_ssh_public_key }}" 43 | register: cephadm_ssh_public_key_slurp 44 | when: cephadm_check_ceph_id.stat.exists 45 | 46 | - name: Set a fact about the SSH public key 47 | set_fact: 48 | cephadm_ssh_public_key_content: >- 49 | {{ cephadm_ssh_public_key_slurp.content | b64decode if cephadm_check_ceph_id.stat.exists else cephadm_ssh_key.public_key }} 50 | -------------------------------------------------------------------------------- /roles/cephadm/templates/cluster.yml.j2: -------------------------------------------------------------------------------- 1 | {% for host in groups['ceph'] %} 2 | --- 3 | service_type: host 4 | hostname: {{ hostvars[host].ansible_facts.nodename }} 5 | {% set cephadm_admin_interface = hostvars[host]['cephadm_admin_interface'] %} 6 | addr: {{ hostvars[host]['ansible_facts'][cephadm_admin_interface | replace('-', '_')]['ipv4']['address'] }} 7 | labels: 8 | {% if host in groups['mons'] %} 9 | - _admin 10 | - mon 11 | {% endif %} 12 | {% if host in groups['mgrs'] %} 13 | - mgr 14 | {% endif %} 15 | {% if host in groups['osds'] %} 16 | - osd 17 | {% endif %} 18 | {% if host in groups.get('rgws', []) %} 19 | - rgw 20 | {% endif %} 21 | {% if host in groups.get('ingress', []) %} 22 | - ingress 23 | {% endif %} 24 | {% if hostvars[host].get('cephadm_host_labels', []) | length > 0 %} 25 | {{ hostvars[host].get('cephadm_host_labels', []) }} 26 | {% endif %} 27 | {% endfor %} 28 | --- 29 | service_type: mon 30 | placement: 31 | count: {{ cephadm_mon_count }} 32 | label: mon 33 | --- 34 | service_type: mgr 35 | placement: 36 | count: {{ cephadm_mgr_count }} 37 | label: mgr 38 | --- 39 | service_type: crash 40 | placement: 41 | host_pattern: "*" 42 | {% if groups.get('rgws', []) | length > 0 %} 43 | {% for service in cephadm_radosgw_services %} 44 | --- 45 | service_type: rgw 46 | service_id: {{ service.id }} 47 | placement: 48 | label: rgw 49 | {% if service.count_per_host is defined %} 50 | count_per_host: {{ service.count_per_host }} 51 | {% endif %} 52 | {% if service.networks is defined %} 53 | {% if service.networks is string %} 54 | networks: 55 | - "{{ service.networks }}" 56 | {% else %} 57 | networks: 58 | {{ service.networks | to_nice_yaml }} 59 | {% endif %} 60 | {% endif %} 61 | {% if service.spec is defined %} 62 | {{ {"spec": service.spec} | to_nice_yaml(indent=2) }} 63 | {% endif %} 64 | {% endfor %} 65 | {% endif %} 66 | {% if groups.get('ingress', []) | length > 0 %} 67 | {% for service in cephadm_ingress_services %} 68 | --- 69 | service_type: ingress 70 | service_id: {{ service.id }} 71 | placement: 72 | label: ingress 73 | {{ {"spec": service.spec} | to_nice_yaml(indent=2) }} 74 | {% endfor %} 75 | {% endif %} 76 | -------------------------------------------------------------------------------- /roles/cephadm/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cephadm_rpm_repos: 3 | - "$basearch" 4 | - "noarch" 5 | cephadm_ceph_releases: 6 | - octopus 7 | - pacific 8 | - quincy 9 | - reef 10 | - squid 11 | cephadm_apt_key_url: "https://download.ceph.com/keys/release.asc" 12 | cephadm_apt_key_path: "/usr/local/share/keyrings/ceph.asc" 13 | -------------------------------------------------------------------------------- /roles/commands/README.md: -------------------------------------------------------------------------------- 1 | # commands 2 | 3 | This role executes arbitrary commands against a Ceph cluster using `cephadm`. 4 | 5 | ## Prerequisites 6 | 7 | ### Host prerequisites 8 | 9 | * The role assumes target hosts connection over SSH with user that has passwordless sudo configured. 10 | * Either direct Internet access or private registry with desired Ceph image accessible to all hosts is required. 11 | 12 | ### Inventory 13 | 14 | This role assumes the existence of the following groups: 15 | 16 | * `mons` 17 | 18 | with at least one host in it - see the `cephadm` role for more details. 19 | 20 | ## Role variables 21 | 22 | 23 | * `cephadm_command`: The command to use with the list of commands to execute - defaults to `ceph`, but can be any command found in the `quay.io/ceph/ceph:` image. 24 | Example: 25 | ``` 26 | cephadm_command: radosgw-admin 27 | ``` 28 | 29 | * `cephadm_commands`: A list of commands to pass to `cephadm shell -- {{ cephadm_command }}` 30 | Example: 31 | ``` 32 | cephadm_commands: 33 | - "fs new cephfs cephfs_metadata cephfs_data" 34 | - "orch apply mds cephfs --placement 3" 35 | ``` 36 | 37 | * `cephadm_commands_until` A expression to evaluate to allow retrying commands. May reference the registered result variable, `cephadm_commands_result`. Default is `true` (do not use retries). 38 | 39 | * `cephadm_commands_retries`: Number of retries to use with `cephadm_commands_until`. Default is 0. 40 | 41 | * `cephadm_commands_delay`: Delay between retries with `cephadm_commands_until`. Default is 0. 42 | -------------------------------------------------------------------------------- /roles/commands/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cephadm_command: ceph 3 | cephadm_commands: [] 4 | cephadm_commands_until: true 5 | cephadm_commands_retries: 0 6 | cephadm_commands_delay: 0 7 | -------------------------------------------------------------------------------- /roles/commands/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Execute custom commands 3 | command: 4 | cmd: "cephadm shell -- {{ cephadm_command }} {{ item }}" 5 | register: cephadm_commands_result 6 | with_items: "{{ cephadm_commands }}" 7 | become: true 8 | changed_when: true 9 | when: cephadm_commands | length > 0 10 | delegate_to: "{{ groups['mons'][0] }}" 11 | run_once: true 12 | until: cephadm_commands_until 13 | retries: "{{ cephadm_commands_retries }}" 14 | delay: "{{ cephadm_commands_delay }}" 15 | vars: 16 | # NOTE: Without this, the delegate hosts's ansible_host variable will not 17 | # be respected. 18 | ansible_host: "{{ hostvars[groups['mons'][0]].ansible_host | default(inventory_hostname) }}" 19 | -------------------------------------------------------------------------------- /roles/crush_rules/README.md: -------------------------------------------------------------------------------- 1 | # crush_rules 2 | 3 | This role creates/deletes Ceph crush rules. 4 | 5 | ## Prerequisites 6 | 7 | ### Host prerequisites 8 | 9 | * The role assumes target hosts connection over SSH with user that has passwordless sudo configured. 10 | * Either direct Internet access or private registry with desired Ceph image accessible to all hosts is required. 11 | 12 | ### Inventory 13 | 14 | This role assumes the existence of the following groups: 15 | 16 | * `mons` 17 | 18 | All Ceph hosts must be in the `ceph` group. 19 | 20 | ## Role variables 21 | 22 | * `cephadm_crush_rules`: A list of pools to define 23 | Example: 24 | ``` 25 | cephadm_crush_rules: 26 | - name: replicated_hdd 27 | bucket_root: default 28 | bucket_type: host 29 | device_class: hdd 30 | rule_type: replicated 31 | state: present 32 | - name: replicated_ssd 33 | bucket_root: default 34 | bucket_type: host 35 | device_class: ssd 36 | rule_type: replicated 37 | state: present 38 | - name: ec_ssd 39 | rule_type: erasure 40 | profile: ec_4_2_ssd 41 | state: present 42 | ``` 43 | 44 | Check the `cephadm_crush_rule` module docs for supported key options. 45 | 46 | -------------------------------------------------------------------------------- /roles/crush_rules/defaults/main.yml: -------------------------------------------------------------------------------- 1 | cephadm_crush_rules: [] 2 | -------------------------------------------------------------------------------- /roles/crush_rules/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure Ceph CRUSH rules are defined 3 | cephadm_crush_rule: 4 | name: "{{ item.name }}" 5 | state: "{{ item.state | default(omit) }}" 6 | rule_type: "{{ item.rule_type | default(omit) }}" 7 | bucket_root: "{{ item.bucket_root | default(omit) }}" 8 | bucket_type: "{{ item.bucket_type | default(omit) }}" 9 | device_class: "{{ item.device_class | default(omit) }}" 10 | profile: "{{ item.profile | default(omit) }}" 11 | with_items: "{{ cephadm_crush_rules }}" 12 | delegate_to: "{{ groups['mons'][0] }}" 13 | run_once: true 14 | -------------------------------------------------------------------------------- /roles/ec_profiles/README.md: -------------------------------------------------------------------------------- 1 | # ec_profiles 2 | 3 | This role creates/deletes Ceph EC profiles. 4 | 5 | ## Prerequisites 6 | 7 | ### Host prerequisites 8 | 9 | * The role assumes target host connection over SSH to the first MON server. 10 | 11 | ### Inventory 12 | 13 | This role assumes the existence of the following groups: 14 | 15 | * `mons` 16 | 17 | ## Role variables 18 | 19 | * `cephadm_ec_profiles`: A list of pools to manage. 20 | Example: 21 | ``` 22 | cephadm_ec_profiles: 23 | - name: foo 24 | k: 4 25 | m: 2 26 | - name: delete_me 27 | state: absent 28 | - name: foo-osd 29 | k: 4 30 | m: 2 31 | crush_failure_domain: osd 32 | 33 | ``` 34 | 35 | Check Erasure Code profiles [docs](https://docs.ceph.com/en/reef/rados/operations/erasure-code-profile/#osd-erasure-code-profile-set) for supported key options. 36 | -------------------------------------------------------------------------------- /roles/ec_profiles/defaults/main.yml: -------------------------------------------------------------------------------- 1 | cephadm_ec_profiles: [] 2 | -------------------------------------------------------------------------------- /roles/ec_profiles/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure Ceph EC profiles are defined 3 | cephadm_ec_profile: 4 | name: "{{ item.name }}" 5 | state: "{{ item.state | default(omit) }}" 6 | stripe_unit: "{{ item.stripe_unit | default(omit) }}" 7 | k: "{{ item.k }}" 8 | m: "{{ item.m }}" 9 | plugin: "{{ item.plugin | default(omit) }}" 10 | directory: "{{ item.directory | default(omit) }}" 11 | crush_root: "{{ item.crush_root | default(omit) }}" 12 | crush_device_class: "{{ item.crush_device_class | default(omit) }}" 13 | crush_failure_domain: "{{ item.crush_failure_domain | default(omit) }}" 14 | with_items: "{{ cephadm_ec_profiles }}" 15 | delegate_to: "{{ groups['mons'][0] }}" 16 | run_once: true 17 | -------------------------------------------------------------------------------- /roles/enter_maintenance/README.md: -------------------------------------------------------------------------------- 1 | # enter_maintenance 2 | 3 | This role places Ceph hosts into maintenance mode using `cephadm`. 4 | 5 | ## Prerequisites 6 | 7 | This role should be executed on one host at a time. This can be achieved by 8 | adding `serial: 1` to a play. 9 | 10 | ### Host prerequisites 11 | 12 | * The role assumes target hosts connection over SSH with user that has passwordless sudo configured. 13 | * Either direct Internet access or private registry with desired Ceph image accessible to all hosts is required. 14 | 15 | ### Inventory 16 | 17 | This role assumes the existence of the following groups: 18 | 19 | * `mons` 20 | 21 | with at least one host in it - see the `cephadm` role for more details. 22 | -------------------------------------------------------------------------------- /roles/enter_maintenance/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cephadm_hostname: "{{ ansible_facts.nodename }}" 3 | -------------------------------------------------------------------------------- /roles/enter_maintenance/tasks/enter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if host can enter maintenance mode 3 | ansible.builtin.include_role: 4 | name: stackhpc.cephadm.commands 5 | vars: 6 | cephadm_commands: 7 | - "orch host ok-to-stop {{ cephadm_hostname }}" 8 | 9 | # Annoyingly, 'ceph orch host ok-to-stop' does not exit non-zero when 10 | # it is not OK to stop, so we need to check for specific messages. 11 | - name: Assert that it is safe to stop host 12 | ansible.builtin.assert: 13 | that: 14 | # This one is seen for monitors 15 | - "'It is NOT safe' not in cephadm_commands_result.results[0].stderr" 16 | # This one is seen for OSDs 17 | - "'unsafe to stop' not in cephadm_commands_result.results[0].stderr" 18 | fail_msg: "{{ cephadm_commands_result.results[0].stderr }}" 19 | 20 | - name: Fail over Ceph manager 21 | ansible.builtin.include_role: 22 | name: stackhpc.cephadm.commands 23 | vars: 24 | cephadm_commands: 25 | - "mgr fail" 26 | when: '"Cannot stop active Mgr daemon" in cephadm_commands_result.results[0].stderr' 27 | 28 | # RADOS Gateway services prevent a host from entering maintenance. 29 | # Remove the rgw label from the host and wait for Ceph orchestrator to remove 30 | # the service from the host. 31 | - name: Stop RADOS Gateway service 32 | when: "'rgws' in group_names" 33 | block: 34 | - name: Ensure rgw label has been removed from node 35 | ansible.builtin.include_role: 36 | name: stackhpc.cephadm.commands 37 | vars: 38 | cephadm_commands: 39 | - "orch host label rm {{ cephadm_hostname }} rgw" 40 | 41 | - name: Wait for RADOS Gateway service to stop 42 | ansible.builtin.include_role: 43 | name: stackhpc.cephadm.commands 44 | vars: 45 | cephadm_commands: 46 | - "orch ls rgw --format json-pretty" 47 | cephadm_commands_until: >- 48 | {{ (cephadm_commands_result.stdout | from_json)[0].status.running == 49 | (cephadm_commands_result.stdout | from_json)[0].status.size }} 50 | cephadm_commands_retries: 30 51 | cephadm_commands_delay: 10 52 | 53 | - name: Ensure host is in maintenance mode 54 | block: 55 | - name: Ensure host is in maintenance mode 56 | ansible.builtin.include_role: 57 | name: stackhpc.cephadm.commands 58 | vars: 59 | cephadm_commands: 60 | - "orch host maintenance enter {{ cephadm_hostname }}" 61 | always: 62 | - name: Ensure rgw label has been added to node 63 | ansible.builtin.include_role: 64 | name: stackhpc.cephadm.commands 65 | vars: 66 | cephadm_commands: 67 | - "orch host label add {{ cephadm_hostname }} rgw" 68 | when: "'rgws' in group_names" 69 | -------------------------------------------------------------------------------- /roles/enter_maintenance/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Assert that execution is serialised 3 | ansible.builtin.assert: 4 | that: 5 | - ansible_play_batch | length == 1 6 | fail_msg: >- 7 | Hosts must be placed into maintenance one at a time in order to first check 8 | whether it is safe to stop them. 9 | 10 | - name: List hosts in maintenance 11 | ansible.builtin.include_role: 12 | name: stackhpc.cephadm.commands 13 | vars: 14 | cephadm_commands: 15 | - "orch host ls --format json-pretty --host_status maintenance" 16 | 17 | # Entering maintenance fails if the host is already in maintenance. 18 | - name: Enter maintenance 19 | ansible.builtin.include_tasks: enter.yml 20 | when: cephadm_hostname not in cephadm_hosts_in_maintenance 21 | vars: 22 | cephadm_hosts_in_maintenance: >- 23 | {{ cephadm_commands_result.results[0].stdout | 24 | from_json | 25 | map(attribute='hostname') | 26 | list }} 27 | -------------------------------------------------------------------------------- /roles/exit_maintenance/README.md: -------------------------------------------------------------------------------- 1 | # exit_maintenance 2 | 3 | This role removes Ceph hosts from maintenance mode using `cephadm`. 4 | 5 | ## Prerequisites 6 | 7 | This role should be executed on one host at a time. This can be achieved by 8 | adding `serial: 1` to a play. 9 | 10 | ### Host prerequisites 11 | 12 | * The role assumes target hosts connection over SSH with user that has passwordless sudo configured. 13 | * Either direct Internet access or private registry with desired Ceph image accessible to all hosts is required. 14 | 15 | ### Inventory 16 | 17 | This role assumes the existence of the following groups: 18 | 19 | * `mons` 20 | 21 | with at least one host in it - see the `cephadm` role for more details. 22 | -------------------------------------------------------------------------------- /roles/exit_maintenance/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cephadm_hostname: "{{ ansible_facts.nodename }}" 3 | -------------------------------------------------------------------------------- /roles/exit_maintenance/tasks/exit.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure host has exited maintenance mode 3 | ansible.builtin.include_role: 4 | name: stackhpc.cephadm.commands 5 | vars: 6 | cephadm_commands: 7 | - "orch host maintenance exit {{ cephadm_hostname }}" 8 | -------------------------------------------------------------------------------- /roles/exit_maintenance/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Assert that execution is serialised 3 | ansible.builtin.assert: 4 | that: 5 | - ansible_play_batch | length == 1 6 | fail_msg: >- 7 | Hosts must be removed from maintenance one at a time. 8 | 9 | - name: List hosts 10 | ansible.builtin.include_role: 11 | name: stackhpc.cephadm.commands 12 | vars: 13 | cephadm_commands: 14 | - "orch host ls --format json-pretty" 15 | 16 | # Exiting maintenance fails if the host is not in maintenance or offline. 17 | - name: Exit maintenance 18 | ansible.builtin.include_tasks: exit.yml 19 | when: cephadm_host_status.status | lower in ["maintenance", "offline"] 20 | vars: 21 | cephadm_host_status: >- 22 | {{ cephadm_commands_result.results[0].stdout | 23 | from_json | 24 | selectattr('hostname', 'equalto', cephadm_hostname) | 25 | first }} 26 | -------------------------------------------------------------------------------- /roles/keys/README.md: -------------------------------------------------------------------------------- 1 | # keys 2 | 3 | This role creates/deletes Ceph keys (cephx). 4 | 5 | ## Prerequisites 6 | 7 | ### Host prerequisites 8 | 9 | * The role assumes target hosts connection over SSH with user that has passwordless sudo configured. 10 | * Either direct Internet access or private registry with desired Ceph image accessible to all hosts is required. 11 | 12 | ### Inventory 13 | 14 | This role assumes the existence of the following groups: 15 | 16 | * `mons` 17 | 18 | ## Role variables 19 | 20 | * `cephadm_keys`: A list of keys to define 21 | Example: 22 | ```yaml 23 | cephadm_keys: 24 | - name: client.glance 25 | caps: 26 | mon: "profile rbd" 27 | osd: "profile rbd pool=images" 28 | mgr: "profile rbd pool=images" 29 | - name: client.user2 30 | caps: 31 | mon: "allow r" 32 | mgr: "allow rw" 33 | state: absent 34 | ``` 35 | 36 | Check the `cephadm_key` module docs for supported key options. 37 | 38 | * Keyrings are never written to disk on Ceph hosts by tasks in this role. If a Cephadm keyring should 39 | be written to the filesystem the following approach can be taken: 40 | ```yaml 41 | - name: Get Ceph keys 42 | stackhpc.cephadm.cephadm_key: 43 | name: client.glance 44 | output_format: plain 45 | state: info 46 | register: cephadm_key_info 47 | become: true 48 | 49 | - name: Write Ceph keys to disk 50 | vars: 51 | cephadm_key: "{{ cephadm_key_info.stdout }}" 52 | cephadm_user: "{{ cephadm_key_info.name }}" 53 | copy: 54 | # Include a trailing newline. 55 | content: | 56 | {{ cephadm_key }} 57 | dest: "/etc/ceph/ceph.{{ cephadm_user }}.keyring" 58 | become: true 59 | ``` -------------------------------------------------------------------------------- /roles/keys/defaults/main.yml: -------------------------------------------------------------------------------- 1 | cephadm_keys: [] 2 | -------------------------------------------------------------------------------- /roles/keys/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure Ceph cephx keys are defined 3 | cephadm_key: 4 | name: "{{ item.name }}" 5 | state: "{{ item.state | default(omit) }}" 6 | caps: "{{ item.caps }}" 7 | secret: "{{ item.key | default(omit) }}" 8 | with_items: "{{ cephadm_keys }}" 9 | delegate_to: "{{ groups['mons'][0] }}" 10 | run_once: true 11 | -------------------------------------------------------------------------------- /roles/pools/README.md: -------------------------------------------------------------------------------- 1 | # pools 2 | 3 | This role creates/deletes Ceph pools. 4 | 5 | ## Prerequisites 6 | 7 | ### Host prerequisites 8 | 9 | * The role assumes target hosts connection over SSH with user that has passwordless sudo configured. 10 | * Either direct Internet access or private registry with desired Ceph image accessible to all hosts is required. 11 | 12 | ### Inventory 13 | 14 | This role assumes the existence of the following groups: 15 | 16 | * `mons` 17 | 18 | All Ceph hosts must be in the `ceph` group. 19 | 20 | ## Role variables 21 | 22 | * `cephadm_pools`: A list of pools to define 23 | Example: 24 | ``` 25 | cephadm_pools: 26 | - name: pool1 27 | size: 3 28 | application: rbd 29 | - name: pool2 30 | size: 2 31 | application: rbd 32 | state: absent 33 | ``` 34 | 35 | Check the `cephadm_pool` module docs for supported pool options. 36 | 37 | -------------------------------------------------------------------------------- /roles/pools/defaults/main.yml: -------------------------------------------------------------------------------- 1 | cephadm_pools: [] 2 | -------------------------------------------------------------------------------- /roles/pools/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure Ceph pools are defined 3 | cephadm_pool: 4 | name: "{{ item.name }}" 5 | state: "{{ item.state | default(omit) }}" 6 | pg_num: "{{ item.pg_num | default(omit) }}" 7 | pgp_num: "{{ item.pgp_num | default(omit) }}" 8 | size: "{{ item.size | default(omit) }}" 9 | min_size: "{{ item.min_size | default(omit) }}" 10 | pool_type: "{{ item.pool_type | default('replicated') }}" 11 | rule_name: "{{ item.rule_name | default(omit) }}" 12 | erasure_profile: "{{ item.erasure_profile | default(omit) }}" 13 | pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}" 14 | target_size_ratio: "{{ item.target_size_ratio | default(omit) }}" 15 | application: "{{ item.application | default(omit) }}" 16 | allow_ec_overwrites: "{{ item.allow_ec_overwrites | default(omit) }}" 17 | with_items: "{{ cephadm_pools }}" 18 | delegate_to: "{{ groups['mons'][0] }}" 19 | run_once: true 20 | become: true 21 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | ansible>=2.9 2 | ansible-lint<7 3 | antsibull-changelog 4 | mock 5 | pytest 6 | pytest-forked 7 | pytest-xdist 8 | -------------------------------------------------------------------------------- /tests/sanity/ignore-2.14.txt: -------------------------------------------------------------------------------- 1 | ignore.txt -------------------------------------------------------------------------------- /tests/sanity/ignore-2.15.txt: -------------------------------------------------------------------------------- 1 | ignore.txt -------------------------------------------------------------------------------- /tests/sanity/ignore-2.16.txt: -------------------------------------------------------------------------------- 1 | ignore.txt -------------------------------------------------------------------------------- /tests/sanity/ignore-2.17.txt: -------------------------------------------------------------------------------- 1 | ignore.txt -------------------------------------------------------------------------------- /tests/sanity/ignore-2.18.txt: -------------------------------------------------------------------------------- 1 | ignore.txt -------------------------------------------------------------------------------- /tests/sanity/ignore.txt: -------------------------------------------------------------------------------- 1 | plugins/modules/cephadm_pool.py pylint:disallowed-name 2 | plugins/modules/cephadm_crush_rule.py validate-modules:missing-gplv3-license 3 | plugins/modules/cephadm_crush_rule.py validate-modules:parameter-state-invalid-choice 4 | plugins/modules/cephadm_crush_rule.py validate-modules:invalid-documentation 5 | plugins/modules/cephadm_ec_profile.py validate-modules:invalid-documentation 6 | plugins/modules/cephadm_ec_profile.py validate-modules:missing-gplv3-license 7 | plugins/modules/cephadm_key.py validate-modules:invalid-documentation 8 | plugins/modules/cephadm_key.py validate-modules:missing-gplv3-license 9 | plugins/modules/cephadm_key.py validate-modules:parameter-state-invalid-choice 10 | plugins/modules/cephadm_pool.py validate-modules:missing-gplv3-license 11 | plugins/modules/cephadm_pool.py validate-modules:parameter-state-invalid-choice 12 | plugins/modules/cephadm_pool.py validate-modules:invalid-documentation 13 | plugins/modules/cephadm_pool.py validate-modules:doc-default-does-not-match-spec 14 | -------------------------------------------------------------------------------- /tests/unit/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackhpc/ansible-collection-cephadm/0035b8ea95843ab8edcb818e8e87085157866402/tests/unit/modules/__init__.py -------------------------------------------------------------------------------- /tests/unit/modules/cephadm_test_common.py: -------------------------------------------------------------------------------- 1 | from ansible.module_utils import basic 2 | from ansible.module_utils._text import to_bytes 3 | import json 4 | 5 | 6 | def set_module_args(args): 7 | if '_ansible_remote_tmp' not in args: 8 | args['_ansible_remote_tmp'] = '/tmp' 9 | if '_ansible_keep_remote_files' not in args: 10 | args['_ansible_keep_remote_files'] = False 11 | 12 | args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) 13 | basic._ANSIBLE_ARGS = to_bytes(args) 14 | 15 | 16 | class AnsibleExitJson(Exception): 17 | pass 18 | 19 | 20 | class AnsibleFailJson(Exception): 21 | pass 22 | 23 | 24 | def exit_json(*args, **kwargs): 25 | raise AnsibleExitJson(kwargs) 26 | 27 | 28 | def fail_json(*args, **kwargs): 29 | raise AnsibleFailJson(kwargs) 30 | -------------------------------------------------------------------------------- /tests/unit/modules/test_cephadm_crush_rule.py: -------------------------------------------------------------------------------- 1 | # Make coding more python3-ish 2 | from __future__ import (absolute_import, division, print_function) 3 | __metaclass__ = type 4 | 5 | import pytest 6 | 7 | from . import cephadm_test_common 8 | from ansible_collections.stackhpc.cephadm.plugins.modules import cephadm_crush_rule 9 | from mock.mock import patch 10 | 11 | fake_cluster = 'ceph' 12 | fake_name = 'foo' 13 | fake_bucket_root = 'default' 14 | fake_bucket_type = 'host' 15 | fake_device_class = 'ssd' 16 | fake_profile = 'default' 17 | fake_user = 'client.admin' 18 | fake_keyring = '/etc/ceph/{0}.{1}.keyring'.format(fake_cluster, fake_user) 19 | 20 | 21 | class TestCephCrushRuleModule(object): 22 | 23 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 24 | def test_without_parameters(self, m_fail_json): 25 | cephadm_test_common.set_module_args({}) 26 | m_fail_json.side_effect = cephadm_test_common.fail_json 27 | 28 | with pytest.raises(cephadm_test_common.AnsibleFailJson) as result: 29 | cephadm_crush_rule.main() 30 | 31 | result = result.value.args[0] 32 | assert result['msg'] == 'missing required arguments: name' 33 | 34 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 35 | def test_with_name_only(self, m_fail_json): 36 | cephadm_test_common.set_module_args({ 37 | 'name': fake_name 38 | }) 39 | m_fail_json.side_effect = cephadm_test_common.fail_json 40 | 41 | with pytest.raises(cephadm_test_common.AnsibleFailJson) as result: 42 | cephadm_crush_rule.main() 43 | 44 | result = result.value.args[0] 45 | assert result['msg'] == 'state is present but all of the following are missing: rule_type' 46 | 47 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 48 | def test_with_check_mode(self, m_exit_json): 49 | cephadm_test_common.set_module_args({ 50 | 'name': fake_name, 51 | 'rule_type': 'replicated', 52 | 'bucket_root': fake_bucket_root, 53 | 'bucket_type': fake_bucket_type, 54 | '_ansible_check_mode': True 55 | }) 56 | m_exit_json.side_effect = cephadm_test_common.exit_json 57 | 58 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 59 | cephadm_crush_rule.main() 60 | 61 | result = result.value.args[0] 62 | assert not result['changed'] 63 | assert result['rc'] == 0 64 | assert not result['stdout'] 65 | assert not result['stderr'] 66 | 67 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 68 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 69 | def test_create_non_existing_replicated_rule(self, m_run_command, m_exit_json): 70 | cephadm_test_common.set_module_args({ 71 | 'name': fake_name, 72 | 'rule_type': 'replicated', 73 | 'bucket_root': fake_bucket_root, 74 | 'bucket_type': fake_bucket_type 75 | }) 76 | m_exit_json.side_effect = cephadm_test_common.exit_json 77 | get_rc = 2 78 | get_stderr = 'Error ENOENT: unknown crush rule \'{0}\''.format(fake_name) 79 | get_stdout = '' 80 | create_rc = 0 81 | create_stderr = '' 82 | create_stdout = '' 83 | m_run_command.side_effect = [ 84 | (get_rc, get_stdout, get_stderr), 85 | (create_rc, create_stdout, create_stderr) 86 | ] 87 | 88 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 89 | cephadm_crush_rule.main() 90 | 91 | result = result.value.args[0] 92 | assert result['changed'] 93 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 94 | 'osd', 'crush', 'rule', 95 | 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type] 96 | assert result['rc'] == create_rc 97 | assert result['stderr'] == create_stderr 98 | assert result['stdout'] == create_stdout 99 | 100 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 101 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 102 | def test_create_existing_replicated_rule(self, m_run_command, m_exit_json): 103 | cephadm_test_common.set_module_args({ 104 | 'name': fake_name, 105 | 'rule_type': 'replicated', 106 | 'bucket_root': fake_bucket_root, 107 | 'bucket_type': fake_bucket_type 108 | }) 109 | m_exit_json.side_effect = cephadm_test_common.exit_json 110 | rc = 0 111 | stderr = '' 112 | stdout = '{{"rule_name":"{0}","type":1,"steps":[{{"item_name":"{1}"}},{{"type":"{2}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) 113 | m_run_command.return_value = rc, stdout, stderr 114 | 115 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 116 | cephadm_crush_rule.main() 117 | 118 | result = result.value.args[0] 119 | assert not result['changed'] 120 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 121 | 'osd', 'crush', 'rule', 'dump', fake_name, '--format=json'] 122 | assert result['rc'] == 0 123 | assert result['stderr'] == stderr 124 | assert result['stdout'] == stdout 125 | 126 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 127 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 128 | def test_create_non_existing_replicated_rule_device_class(self, m_run_command, m_exit_json): 129 | cephadm_test_common.set_module_args({ 130 | 'name': fake_name, 131 | 'rule_type': 'replicated', 132 | 'bucket_root': fake_bucket_root, 133 | 'bucket_type': fake_bucket_type, 134 | 'device_class': fake_device_class 135 | }) 136 | m_exit_json.side_effect = cephadm_test_common.exit_json 137 | get_rc = 2 138 | get_stderr = 'Error ENOENT: unknown crush rule \'{0}\''.format(fake_name) 139 | get_stdout = '' 140 | create_rc = 0 141 | create_stderr = '' 142 | create_stdout = '' 143 | m_run_command.side_effect = [ 144 | (get_rc, get_stdout, get_stderr), 145 | (create_rc, create_stdout, create_stderr) 146 | ] 147 | 148 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 149 | cephadm_crush_rule.main() 150 | 151 | result = result.value.args[0] 152 | assert result['changed'] 153 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 154 | 'osd', 'crush', 'rule', 155 | 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type, fake_device_class] 156 | assert result['rc'] == create_rc 157 | assert result['stderr'] == create_stderr 158 | assert result['stdout'] == create_stdout 159 | 160 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 161 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 162 | def test_create_existing_replicated_rule_device_class(self, m_run_command, m_exit_json): 163 | cephadm_test_common.set_module_args({ 164 | 'name': fake_name, 165 | 'rule_type': 'replicated', 166 | 'bucket_root': fake_bucket_root, 167 | 'bucket_type': fake_bucket_type, 168 | 'device_class': fake_device_class 169 | }) 170 | m_exit_json.side_effect = cephadm_test_common.exit_json 171 | rc = 0 172 | stderr = '' 173 | stdout = '{{"rule_name":"{0}","type":1,"steps":[{{"item_name":"{1}"}},{{"type":"{2}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) 174 | m_run_command.return_value = rc, stdout, stderr 175 | 176 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 177 | cephadm_crush_rule.main() 178 | 179 | result = result.value.args[0] 180 | assert not result['changed'] 181 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 182 | 'osd', 'crush', 'rule', 'dump', fake_name, '--format=json'] 183 | assert result['rc'] == 0 184 | assert result['stderr'] == stderr 185 | assert result['stdout'] == stdout 186 | 187 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 188 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 189 | def test_create_non_existing_erasure_rule(self, m_run_command, m_exit_json): 190 | cephadm_test_common.set_module_args({ 191 | 'name': fake_name, 192 | 'rule_type': 'erasure', 193 | 'profile': fake_profile 194 | }) 195 | m_exit_json.side_effect = cephadm_test_common.exit_json 196 | get_rc = 2 197 | get_stderr = 'Error ENOENT: unknown crush rule \'{0}\''.format(fake_name) 198 | get_stdout = '' 199 | create_rc = 0 200 | create_stderr = '' 201 | create_stdout = 'created rule {0} at 1'.format(fake_name) 202 | m_run_command.side_effect = [ 203 | (get_rc, get_stdout, get_stderr), 204 | (create_rc, create_stdout, create_stderr) 205 | ] 206 | 207 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 208 | cephadm_crush_rule.main() 209 | 210 | result = result.value.args[0] 211 | assert result['changed'] 212 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 213 | 'osd', 'crush', 'rule', 'create-erasure', fake_name, fake_profile] 214 | assert result['rc'] == create_rc 215 | assert result['stderr'] == create_stderr 216 | assert result['stdout'] == create_stdout 217 | 218 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 219 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 220 | def test_create_existing_erasure_rule(self, m_run_command, m_exit_json): 221 | cephadm_test_common.set_module_args({ 222 | 'name': fake_name, 223 | 'rule_type': 'erasure', 224 | 'profile': fake_profile 225 | }) 226 | m_exit_json.side_effect = cephadm_test_common.exit_json 227 | rc = 0 228 | stderr = '' 229 | stdout = '{{"type":3,"rule_name":"{0}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) 230 | m_run_command.return_value = rc, stdout, stderr 231 | 232 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 233 | cephadm_crush_rule.main() 234 | 235 | result = result.value.args[0] 236 | assert not result['changed'] 237 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 238 | 'osd', 'crush', 'rule', 'dump', fake_name, '--format=json'] 239 | assert result['rc'] == 0 240 | assert result['stderr'] == stderr 241 | assert result['stdout'] == stdout 242 | 243 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 244 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 245 | def test_update_existing_replicated_rule(self, m_run_command, m_fail_json): 246 | cephadm_test_common.set_module_args({ 247 | 'name': fake_name, 248 | 'rule_type': 'replicated', 249 | 'bucket_root': fake_bucket_root, 250 | 'bucket_type': fake_bucket_type, 251 | 'device_class': fake_device_class 252 | }) 253 | m_fail_json.side_effect = cephadm_test_common.fail_json 254 | rc = 0 255 | stderr = '' 256 | stdout = '{{"type":3,"rule_name":"{0}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) 257 | m_run_command.return_value = rc, stdout, stderr 258 | 259 | with pytest.raises(cephadm_test_common.AnsibleFailJson) as result: 260 | cephadm_crush_rule.main() 261 | 262 | result = result.value.args[0] 263 | assert not result['changed'] 264 | assert result['msg'] == 'Can not convert crush rule {0} to replicated'.format(fake_name) 265 | assert result['rc'] == 1 266 | 267 | @patch('ansible.module_utils.basic.AnsibleModule.fail_json') 268 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 269 | def test_update_existing_erasure_rule(self, m_run_command, m_fail_json): 270 | cephadm_test_common.set_module_args({ 271 | 'name': fake_name, 272 | 'rule_type': 'erasure', 273 | 'profile': fake_profile 274 | }) 275 | m_fail_json.side_effect = cephadm_test_common.fail_json 276 | rc = 0 277 | stderr = '' 278 | stdout = '{{"type":1,"rule_name":"{0}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) 279 | m_run_command.return_value = rc, stdout, stderr 280 | 281 | with pytest.raises(cephadm_test_common.AnsibleFailJson) as result: 282 | cephadm_crush_rule.main() 283 | 284 | result = result.value.args[0] 285 | assert not result['changed'] 286 | assert result['msg'] == 'Can not convert crush rule {0} to erasure'.format(fake_name) 287 | assert result['rc'] == 1 288 | 289 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 290 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 291 | def test_remove_non_existing_rule(self, m_run_command, m_exit_json): 292 | cephadm_test_common.set_module_args({ 293 | 'name': fake_name, 294 | 'state': 'absent' 295 | }) 296 | m_exit_json.side_effect = cephadm_test_common.exit_json 297 | rc = 2 298 | stderr = 'Error ENOENT: unknown crush rule \'{0}\''.format(fake_name) 299 | stdout = '' 300 | m_run_command.return_value = rc, stdout, stderr 301 | 302 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 303 | cephadm_crush_rule.main() 304 | 305 | result = result.value.args[0] 306 | assert not result['changed'] 307 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 308 | 'osd', 'crush', 'rule', 'dump', fake_name, '--format=json'] 309 | assert result['rc'] == 0 310 | assert result['stderr'] == stderr 311 | assert result['stdout'] == "Crush Rule {0} doesn't exist".format(fake_name) 312 | 313 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 314 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 315 | def test_remove_existing_rule(self, m_run_command, m_exit_json): 316 | cephadm_test_common.set_module_args({ 317 | 'name': fake_name, 318 | 'state': 'absent' 319 | }) 320 | m_exit_json.side_effect = cephadm_test_common.exit_json 321 | get_rc = 0 322 | get_stderr = '' 323 | get_stdout = '{{"rule_name":"{0}","steps":[{{"item_name":"{1}"}},{{"type":"{2}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) 324 | remove_rc = 0 325 | remove_stderr = '' 326 | remove_stdout = '' 327 | m_run_command.side_effect = [ 328 | (get_rc, get_stdout, get_stderr), 329 | (remove_rc, remove_stdout, remove_stderr) 330 | ] 331 | 332 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 333 | cephadm_crush_rule.main() 334 | 335 | result = result.value.args[0] 336 | assert result['changed'] 337 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 338 | 'osd', 'crush', 'rule', 'rm', fake_name] 339 | assert result['rc'] == remove_rc 340 | assert result['stderr'] == remove_stderr 341 | assert result['stdout'] == remove_stdout 342 | 343 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 344 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 345 | def test_get_non_existing_rule(self, m_run_command, m_exit_json): 346 | cephadm_test_common.set_module_args({ 347 | 'name': fake_name, 348 | 'state': 'info' 349 | }) 350 | m_exit_json.side_effect = cephadm_test_common.exit_json 351 | rc = 2 352 | stderr = 'Error ENOENT: unknown crush rule \'{0}\''.format(fake_name) 353 | stdout = '' 354 | m_run_command.return_value = rc, stdout, stderr 355 | 356 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 357 | cephadm_crush_rule.main() 358 | 359 | result = result.value.args[0] 360 | assert not result['changed'] 361 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 362 | 'osd', 'crush', 'rule', 'dump', fake_name, '--format=json'] 363 | 364 | assert result['rc'] == rc 365 | assert result['stderr'] == stderr 366 | assert result['stdout'] == stdout 367 | 368 | @patch('ansible.module_utils.basic.AnsibleModule.exit_json') 369 | @patch('ansible.module_utils.basic.AnsibleModule.run_command') 370 | def test_get_existing_rule(self, m_run_command, m_exit_json): 371 | cephadm_test_common.set_module_args({ 372 | 'name': fake_name, 373 | 'state': 'info' 374 | }) 375 | m_exit_json.side_effect = cephadm_test_common.exit_json 376 | rc = 0 377 | stderr = '' 378 | stdout = '{{"rule_name":"{0}","steps":[{{"item_name":"{1}"}},{{"type":"{2}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) 379 | m_run_command.return_value = rc, stdout, stderr 380 | 381 | with pytest.raises(cephadm_test_common.AnsibleExitJson) as result: 382 | cephadm_crush_rule.main() 383 | 384 | result = result.value.args[0] 385 | assert not result['changed'] 386 | assert result['cmd'] == ['cephadm', '--timeout', '60', 'shell', '--', 'ceph', 387 | 'osd', 'crush', 'rule', 'dump', fake_name, '--format=json'] 388 | 389 | assert result['rc'] == rc 390 | assert result['stderr'] == stderr 391 | assert result['stdout'] == stdout 392 | --------------------------------------------------------------------------------