├── .github ├── .jira_sync_config.yaml ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── automatic-doc-checks.yml │ ├── commits.yml │ ├── q2q-candidate-upgrade.yml │ ├── q2r-candidate-upgrade.yaml │ ├── r2r-candidate-upgrade.yaml │ ├── r2s-edge-upgrade.yaml │ ├── s2s-candidate-upgrade.yaml │ └── tests.yml ├── .gitignore ├── .readthedocs.yaml ├── CONTRIBUTING.md ├── COPYING ├── HACKING.md ├── README.md ├── SECURITY.md ├── assets ├── add_osd.png ├── bootstrap.png └── enable_rgw.png ├── dependabot.yml ├── docs ├── .custom_wordlist.txt ├── .gitignore ├── .readthedocs.yaml ├── .sphinx │ ├── _static │ │ ├── custom.css │ │ ├── favicon.png │ │ ├── furo_colors.css │ │ ├── github_issue_links.css │ │ ├── github_issue_links.js │ │ ├── header-nav.js │ │ ├── header.css │ │ └── tag.png │ ├── _templates │ │ ├── base.html │ │ ├── footer.html │ │ ├── header.html │ │ └── page.html │ ├── requirements.txt │ └── spellingcheck.yaml ├── .wokeignore ├── .wordlist.txt ├── Makefile ├── conf.py ├── contributing │ └── index.rst ├── custom_conf.py ├── explanation │ ├── assets │ │ └── flow.jpg │ ├── cluster-configurations.rst │ ├── cluster-maintenance.rst │ ├── cluster-scaling.rst │ ├── index.rst │ ├── microceph-charm.rst │ ├── security │ │ ├── cryptographic-approaches.rst │ │ ├── full-disk-encryption.rst │ │ └── security-overview.rst │ ├── snap-content-interface.rst │ └── taking-snapshots.rst ├── how-to │ ├── assets │ │ ├── alerts │ │ ├── prometheus_alerts.yaml │ │ ├── prometheus_console.jpg │ │ └── prometheus_microceph_scraping.jpg │ ├── change-log-level.rst │ ├── configure-network-keys.rst │ ├── configure-rbd-mirroring.rst │ ├── enable-alerts.rst │ ├── enable-metrics.rst │ ├── enable-service-instances.rst │ ├── import-remote-cluster.rst │ ├── index.rst │ ├── integrate-keystone.rst │ ├── major-upgrade.rst │ ├── migrate-auto-services.rst │ ├── mount-block-device.rst │ ├── mount-cephfs-share.rst │ ├── multi-node.rst │ ├── perform-cluster-maintenance.rst │ ├── perform-site-failover.rst │ ├── rbd-client-cfg.rst │ ├── remove-disk.rst │ └── single-node.rst ├── index.rst ├── make.bat ├── openapi │ └── openapi.yaml ├── reference │ ├── commands │ │ ├── .cmd-template │ │ ├── client.rst │ │ ├── cluster.rst │ │ ├── disable.rst │ │ ├── disk.rst │ │ ├── enable.rst │ │ ├── help.rst │ │ ├── index.rst │ │ ├── init.rst │ │ ├── pool.rst │ │ ├── remote.rst │ │ ├── replication-rbd.rst │ │ └── status.rst │ ├── index.rst │ └── release-notes.rst ├── reuse │ └── links.txt └── tutorial │ └── get-started.rst ├── microceph ├── Makefile ├── api │ ├── client_configs.go │ ├── cluster.go │ ├── configs.go │ ├── disks.go │ ├── microceph_configs.go │ ├── ops_maintenance.go │ ├── ops_replication.go │ ├── pool.go │ ├── remote.go │ ├── resources.go │ ├── servers.go │ ├── services.go │ └── types │ │ ├── client_configs.go │ │ ├── configs.go │ │ ├── disks.go │ │ ├── endpoint_prefix.go │ │ ├── log.go │ │ ├── maintenance.go │ │ ├── pool.go │ │ ├── remote.go │ │ ├── replication.go │ │ ├── replication_rbd.go │ │ └── services.go ├── ceph │ ├── bootstrap.go │ ├── bootstrap_test.go │ ├── ceph_rbd_mirror.go │ ├── client_config.go │ ├── client_config_test.go │ ├── config.go │ ├── config_test.go │ ├── configwriter.go │ ├── configwriter_test.go │ ├── crush.go │ ├── join.go │ ├── keyring.go │ ├── keyring_test.go │ ├── log.go │ ├── maintenance.go │ ├── manager.go │ ├── metadata.go │ ├── monitor.go │ ├── operations.go │ ├── operations_test.go │ ├── osd.go │ ├── osd_test.go │ ├── pre_remove_test.go │ ├── rbd_mirror.go │ ├── rbd_mirror_test.go │ ├── remove.go │ ├── replication.go │ ├── replication_rbd.go │ ├── rgw.go │ ├── rgw_test.go │ ├── run.go │ ├── service_placement_client.go │ ├── service_placement_mon.go │ ├── services.go │ ├── services_placement.go │ ├── services_placement_generic.go │ ├── services_placement_rgw.go │ ├── services_placement_test.go │ ├── services_test.go │ ├── snap.go │ ├── start.go │ ├── start_test.go │ ├── subprocess.go │ └── test_assets │ │ ├── rbd_mirror_image_status.json │ │ ├── rbd_mirror_pool_info.json │ │ ├── rbd_mirror_pool_status.json │ │ ├── rbd_mirror_promote_secondary_failure.txt │ │ └── rbd_mirror_verbose_pool_status.json ├── client │ ├── client_configs.go │ ├── cluster.go │ ├── configs.go │ ├── disks.go │ ├── log.go │ ├── maintenance.go │ ├── pool.go │ ├── remote.go │ ├── replication.go │ ├── services.go │ └── wrap.go ├── cmd │ ├── microceph │ │ ├── client.go │ │ ├── client_config.go │ │ ├── client_config_get.go │ │ ├── client_config_list.go │ │ ├── client_config_reset.go │ │ ├── client_config_set.go │ │ ├── cluster.go │ │ ├── cluster_add.go │ │ ├── cluster_bootstrap.go │ │ ├── cluster_config.go │ │ ├── cluster_config_get.go │ │ ├── cluster_config_list.go │ │ ├── cluster_config_reset.go │ │ ├── cluster_config_set.go │ │ ├── cluster_export.go │ │ ├── cluster_join.go │ │ ├── cluster_list.go │ │ ├── cluster_maintenance.go │ │ ├── cluster_maintenance_enter.go │ │ ├── cluster_maintenance_exit.go │ │ ├── cluster_migrate.go │ │ ├── cluster_remove.go │ │ ├── cluster_sql.go │ │ ├── disable.go │ │ ├── disable_rgw.go │ │ ├── disk.go │ │ ├── disk_add.go │ │ ├── disk_list.go │ │ ├── disk_remove.go │ │ ├── enable.go │ │ ├── enable_mds.go │ │ ├── enable_mgr.go │ │ ├── enable_mon.go │ │ ├── enable_rbd_mirror.go │ │ ├── enable_rgw.go │ │ ├── init.go │ │ ├── log.go │ │ ├── main.go │ │ ├── pool.go │ │ ├── remote.go │ │ ├── remote_import.go │ │ ├── remote_list.go │ │ ├── remote_remove.go │ │ ├── replication.go │ │ ├── replication_configure.go │ │ ├── replication_demote.go │ │ ├── replication_disable.go │ │ ├── replication_enable.go │ │ ├── replication_list.go │ │ ├── replication_promote.go │ │ ├── replication_status.go │ │ └── status.go │ └── microcephd │ │ └── main.go ├── common │ ├── bootstrap.go │ ├── cluster.go │ ├── fileutils.go │ ├── network.go │ ├── set.go │ ├── storage.go │ └── storage_test.go ├── constants │ └── constants.go ├── database │ ├── client_config.go │ ├── client_config.mapper.go │ ├── client_config_extras.go │ ├── config.go │ ├── config.mapper.go │ ├── disk.go │ ├── disk.mapper.go │ ├── disk_extras.go │ ├── remote.go │ ├── remote.mapper.go │ ├── remote_extras.go │ ├── schema.go │ ├── service.go │ └── service.mapper.go ├── go.mod ├── go.sum ├── interfaces │ └── state.go ├── mocks │ ├── ClientConfigQueryIntf.go │ ├── ClientInterface.go │ ├── ConfigWriter.go │ ├── MemberCounterInterface.go │ ├── MicroclusterState.go │ ├── NetworkIntf.go │ ├── OSDQueryInterface.go │ ├── PlacementIntf.go │ ├── Runner.go │ └── StateInterface.go ├── tests │ ├── testdata │ │ ├── ceph.client.admin.keyring │ │ └── ceph.conf │ └── testutils.go └── version │ └── version.go ├── snap ├── hooks │ ├── install │ └── post-refresh └── snapcraft.yaml ├── snapcraft └── commands │ ├── ceph │ ├── common │ ├── daemon.start │ ├── mds.start │ ├── mgr.start │ ├── microceph │ ├── mon.start │ ├── osd.reload │ ├── osd.start │ ├── rados │ ├── radosgw-admin │ ├── rbd │ ├── rbd-mirror.start │ └── rgw.start ├── tests ├── hurl │ ├── maintenance-put-failed.hurl │ └── services-mon.hurl └── scripts │ └── actionutils.sh └── tox.ini /.github/.jira_sync_config.yaml: -------------------------------------------------------------------------------- 1 | settings: 2 | # Jira project key to create the issue in 3 | jira_project_key: "CEPH" 4 | 5 | # Dictionary mapping GitHub issue status to Jira issue status 6 | status_mapping: 7 | opened: Untriaged 8 | closed: Done 9 | 10 | # (Optional) Jira project components that should be attached to the created issue 11 | # Component names are case-sensitive 12 | components: 13 | - MicroCeph 14 | 15 | # (Optional) (Default: false) Add a new comment in GitHub with a link to Jira created issue 16 | add_gh_comment: true 17 | 18 | # (Optional) (Default: None) Parent Epic key to link the issue to 19 | epic_key: CEPH-972 20 | 21 | # (Optional) Dictionary mapping GitHub issue labels to Jira issue types. 22 | # If label on the issue is not in specified list, this issue will be created as a Bug 23 | label_mapping: 24 | enhancement: Story 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Issue report 2 | 3 | ## What version of MicroCeph are you using ? 4 | 5 | Use this section to describe the channel/revision which produces the unexpected behaviour. 6 | This information can be fetched from the `installed:` section of `sudo snap info microceph` output. 7 | 8 | ## What are the steps to reproduce this issue ? 9 | 10 | 1. … 11 | 2. … 12 | 3. … 13 | 14 | ## What happens (observed behaviour) ? 15 | 16 | … 17 | 18 | ## What were you expecting to happen ? 19 | 20 | … 21 | 22 | ## Relevant logs, error output, etc. 23 | 24 | If it’s considerably long, please paste to https://gist.github.com/ and insert the link here. 25 | 26 | ## Additional comments. 27 | 28 | … 29 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | Please include a summary of the changes and the related issue. Also include relevant 4 | motivation and context. List any dependencies that are required for this change. 5 | 6 | If it fixes a reported issue, mention it here, i.e.: 7 | Fixes # 8 | 9 | ## Type of change 10 | 11 | Delete options that are not relevant. 12 | 13 | - Bug fix (non-breaking change which fixes an issue) 14 | - New feature (non-breaking change which adds functionality) 15 | - Breaking change (fix or feature that would cause existing functionality to not work as expected) 16 | - Clean code (code refactor, test updates; does not introduce functional changes) 17 | - Documentation update (change to documentation only) 18 | 19 | ## How has this been tested? 20 | 21 | > [!NOTE] 22 | > All functional changes should accompany corresponding tests (unit tests, functional tests, etc.). 23 | 24 | Please describe the addition/modification of tests done to verify this change. Also list any 25 | relevant details for your test configuration. 26 | 27 | ## Contributor checklist 28 | 29 | Please check that you have: 30 | 31 | - [ ] self-reviewed the code in this PR 32 | - [ ] added code comments, particularly in less straightforward areas 33 | - [ ] checked and added or updated relevant documentation 34 | - [ ] checked and added or updated relevant release notes 35 | - [ ] added tests to verify effectiveness of this change -------------------------------------------------------------------------------- /.github/workflows/automatic-doc-checks.yml: -------------------------------------------------------------------------------- 1 | name: Main Documentation Checks 2 | 3 | on: 4 | - push 5 | - pull_request 6 | - workflow_dispatch 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.ref }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | documentation-checks: 14 | uses: canonical/documentation-workflows/.github/workflows/documentation-checks.yaml@main 15 | with: 16 | working-directory: 'docs' 17 | 18 | openapi-check: 19 | name: OpenAPI Spec Check 20 | runs-on: ubuntu-22.04 21 | steps: 22 | - name: Checkout code 23 | uses: actions/checkout@v3 24 | with: 25 | fetch-depth: 0 26 | 27 | - name: Install dependencies 28 | run: pip install openapi-spec-validator 29 | 30 | - name: Check spec 31 | run: | 32 | set -eux 33 | openapi-spec-validator ./docs/openapi/openapi.yaml -------------------------------------------------------------------------------- /.github/workflows/commits.yml: -------------------------------------------------------------------------------- 1 | name: Commits 2 | on: 3 | - pull_request 4 | 5 | permissions: 6 | contents: read 7 | 8 | jobs: 9 | cla-check: 10 | permissions: 11 | pull-requests: read 12 | name: Canonical CLA signed 13 | runs-on: ubuntu-24.04 14 | steps: 15 | - name: Check if CLA signed 16 | uses: canonical/has-signed-canonical-cla@v2 17 | 18 | dco-check: 19 | permissions: 20 | pull-requests: read # for tim-actions/get-pr-commits to get list of commits from the PR 21 | name: Signed-off-by (DCO) 22 | runs-on: ubuntu-24.04 23 | steps: 24 | - name: Get PR Commits 25 | id: 'get-pr-commits' 26 | uses: tim-actions/get-pr-commits@198af03565609bb4ed924d1260247b4881f09e7d # v1.3.1 27 | with: 28 | token: ${{ secrets.GITHUB_TOKEN }} 29 | 30 | - name: Check that all commits are signed-off 31 | uses: tim-actions/dco@f2279e6e62d5a7d9115b0cb8e837b777b1b02e21 # v1.1.0 32 | with: 33 | commits: ${{ steps.get-pr-commits.outputs.commits }} 34 | -------------------------------------------------------------------------------- /.github/workflows/q2q-candidate-upgrade.yml: -------------------------------------------------------------------------------- 1 | name: Upgrade a q/stable cluster to q/candidate 2 | on: 3 | # Allows you to run this workflow manually from the Actions tab 4 | workflow_dispatch: null 5 | 6 | jobs: 7 | # a2b upgrade implies a/stable -> b/candidate release upgrade. 8 | q2q-upgrade-test: 9 | name: Test quincy/stable to quincy/candidate upgrades 10 | runs-on: ubuntu-22.04 11 | steps: 12 | 13 | - name: Checkout code 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Copy utils 19 | run: cp tests/scripts/actionutils.sh $HOME 20 | 21 | - name: Clear FORWARD firewall rules 22 | run: ~/actionutils.sh cleaript 23 | 24 | - name: Free disk 25 | run: ~/actionutils.sh free_runner_disk 26 | 27 | - name: Install dependencies 28 | run: ~/actionutils.sh setup_lxd 29 | 30 | - name: Create containers with loopback devices 31 | run: ~/actionutils.sh create_containers public 32 | 33 | - name: Install quincy stable from store 34 | run: ~/actionutils.sh install_store quincy/stable 35 | 36 | - name: Bootstrap 37 | run: ~/actionutils.sh bootstrap_head 38 | 39 | - name: Setup cluster 40 | run: ~/actionutils.sh cluster_nodes 41 | 42 | - name: Add 3 OSDs 43 | run: | 44 | for c in node-wrk0 node-wrk1 node-wrk2 ; do 45 | ~/actionutils.sh add_osd_to_node $c 46 | done 47 | ~/actionutils.sh headexec wait_for_osds 3 48 | 49 | - name: Enable RGW 50 | run: ~/actionutils.sh headexec enable_rgw 51 | 52 | - name: Exercise RGW 53 | run: ~/actionutils.sh headexec testrgw 54 | 55 | - name: Upgrade to candidate 56 | run: ~/actionutils.sh refresh_snap quincy/candidate 57 | 58 | - name: Wait until 3 OSDs are up 59 | run: ~/actionutils.sh headexec wait_for_osds 3 60 | 61 | - name: Verify config 62 | run: ~/actionutils.sh test_ceph_conf 63 | 64 | - name: Exercise RGW again 65 | run: ~/actionutils.sh headexec testrgw 66 | -------------------------------------------------------------------------------- /.github/workflows/q2r-candidate-upgrade.yaml: -------------------------------------------------------------------------------- 1 | name: Upgrade a q/stable cluster to r/candidate 2 | on: 3 | # Allows you to run this workflow manually from the Actions tab 4 | workflow_dispatch: null 5 | 6 | jobs: 7 | # a2b upgrade implies a/stable -> b/candidate release upgrade. 8 | q2r-upgrade-test: 9 | name: Test quincy/stable to reef/candidate upgrades 10 | runs-on: ubuntu-22.04 11 | steps: 12 | 13 | - name: Checkout code 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Copy utils 19 | run: cp tests/scripts/actionutils.sh $HOME 20 | 21 | - name: Clear FORWARD firewall rules 22 | run: ~/actionutils.sh cleaript 23 | 24 | - name: Free disk 25 | run: ~/actionutils.sh free_runner_disk 26 | 27 | - name: Install dependencies 28 | run: ~/actionutils.sh setup_lxd 29 | 30 | - name: Create containers with loopback devices 31 | run: ~/actionutils.sh create_containers public 32 | 33 | - name: Install quincy stable from store 34 | run: ~/actionutils.sh install_store quincy/stable 35 | 36 | - name: Bootstrap 37 | run: ~/actionutils.sh bootstrap_head 38 | 39 | - name: Setup cluster 40 | run: ~/actionutils.sh cluster_nodes 41 | 42 | - name: Add 3 OSDs 43 | run: | 44 | for c in node-wrk0 node-wrk1 node-wrk2 ; do 45 | ~/actionutils.sh add_osd_to_node $c 46 | done 47 | ~/actionutils.sh headexec wait_for_osds 3 48 | 49 | - name: Enable RGW 50 | run: ~/actionutils.sh headexec enable_rgw 51 | 52 | - name: Exercise RGW 53 | run: ~/actionutils.sh headexec testrgw 54 | 55 | - name: Upgrade to candidate 56 | run: ~/actionutils.sh refresh_snap reef/candidate 57 | 58 | - name: Wait until 3 OSDs are up 59 | run: ~/actionutils.sh headexec wait_for_osds 3 60 | 61 | - name: Verify config 62 | run: ~/actionutils.sh test_ceph_conf 63 | 64 | - name: Exercise RGW again 65 | run: ~/actionutils.sh headexec testrgw 66 | -------------------------------------------------------------------------------- /.github/workflows/r2r-candidate-upgrade.yaml: -------------------------------------------------------------------------------- 1 | name: Upgrade a r/stable cluster to r/candidate 2 | on: 3 | # Allows you to run this workflow manually from the Actions tab 4 | workflow_dispatch: null 5 | 6 | jobs: 7 | # a2b upgrade implies a/stable -> b/candidate release upgrade. 8 | r2r-upgrade-test: 9 | name: Test reef/stable to reef/candidate upgrades 10 | runs-on: ubuntu-22.04 11 | steps: 12 | 13 | - name: Checkout code 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Copy utils 19 | run: cp tests/scripts/actionutils.sh $HOME 20 | 21 | - name: Clear FORWARD firewall rules 22 | run: ~/actionutils.sh cleaript 23 | 24 | - name: Free disk 25 | run: ~/actionutils.sh free_runner_disk 26 | 27 | - name: Install dependencies 28 | run: ~/actionutils.sh setup_lxd 29 | 30 | - name: Create containers with loopback devices 31 | run: ~/actionutils.sh create_containers public 32 | 33 | - name: Install quincy stable from store 34 | run: ~/actionutils.sh install_store reef/stable 35 | 36 | - name: Bootstrap 37 | run: ~/actionutils.sh bootstrap_head 38 | 39 | - name: Setup cluster 40 | run: ~/actionutils.sh cluster_nodes 41 | 42 | - name: Add 3 OSDs 43 | run: | 44 | for c in node-wrk0 node-wrk1 node-wrk2 ; do 45 | ~/actionutils.sh add_osd_to_node $c 46 | done 47 | ~/actionutils.sh headexec wait_for_osds 3 48 | 49 | - name: Enable RGW 50 | run: ~/actionutils.sh headexec enable_rgw 51 | 52 | - name: Exercise RGW 53 | run: ~/actionutils.sh headexec testrgw 54 | 55 | - name: Upgrade to candidate 56 | run: ~/actionutils.sh refresh_snap reef/candidate 57 | 58 | - name: Wait until 3 OSDs are up 59 | run: ~/actionutils.sh headexec wait_for_osds 3 60 | 61 | - name: Verify config 62 | run: ~/actionutils.sh test_ceph_conf 63 | 64 | - name: Exercise RGW again 65 | run: ~/actionutils.sh headexec testrgw 66 | -------------------------------------------------------------------------------- /.github/workflows/r2s-edge-upgrade.yaml: -------------------------------------------------------------------------------- 1 | name: Upgrade a r/stable cluster to squid/candidate 2 | on: 3 | # Allows you to run this workflow manually from the Actions tab 4 | workflow_dispatch: null 5 | 6 | jobs: 7 | r2s-upgrade-test: 8 | name: Test reef/stable to squid/candidate upgrades 9 | runs-on: ubuntu-22.04 10 | steps: 11 | 12 | - name: Checkout code 13 | uses: actions/checkout@v3 14 | with: 15 | fetch-depth: 0 16 | 17 | - name: Copy utils 18 | run: cp tests/scripts/actionutils.sh $HOME 19 | 20 | - name: Clear FORWARD firewall rules 21 | run: ~/actionutils.sh cleaript 22 | 23 | - name: Free disk 24 | run: ~/actionutils.sh free_runner_disk 25 | 26 | - name: Install dependencies 27 | run: ~/actionutils.sh setup_lxd 28 | 29 | - name: Create containers with loopback devices 30 | run: ~/actionutils.sh create_containers public 31 | 32 | - name: Install reef stable from store 33 | run: ~/actionutils.sh install_store reef/stable 34 | 35 | - name: Bootstrap 36 | run: ~/actionutils.sh bootstrap_head 37 | 38 | - name: Setup cluster 39 | run: ~/actionutils.sh cluster_nodes 40 | 41 | - name: Add 3 OSDs 42 | run: | 43 | for c in node-wrk0 node-wrk1 node-wrk2 ; do 44 | ~/actionutils.sh add_osd_to_node $c 45 | done 46 | ~/actionutils.sh headexec wait_for_osds 3 47 | 48 | - name: Enable RGW 49 | run: ~/actionutils.sh headexec enable_rgw 50 | 51 | - name: Exercise RGW 52 | run: ~/actionutils.sh headexec testrgw 53 | 54 | - name: Upgrade to candidate 55 | run: ~/actionutils.sh refresh_snap squid/candidate 56 | 57 | - name: Wait until 3 OSDs are up 58 | run: ~/actionutils.sh headexec wait_for_osds 3 59 | 60 | - name: Verify config 61 | run: ~/actionutils.sh test_ceph_conf 62 | 63 | - name: Verify health 64 | run: ~/actionutils.sh headexec verify_health 65 | 66 | - name: Exercise RGW again 67 | run: ~/actionutils.sh headexec testrgw 68 | -------------------------------------------------------------------------------- /.github/workflows/s2s-candidate-upgrade.yaml: -------------------------------------------------------------------------------- 1 | name: Upgrade a s/stable cluster to s/candidate 2 | on: 3 | # Allows you to run this workflow manually from the Actions tab 4 | workflow_dispatch: null 5 | 6 | jobs: 7 | # a2b upgrade implies a/stable -> b/candidate release upgrade. 8 | r2r-upgrade-test: 9 | name: Test squid/stable to squid/candidate upgrades 10 | runs-on: ubuntu-22.04 11 | steps: 12 | 13 | - name: Checkout code 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Copy utils 19 | run: cp tests/scripts/actionutils.sh $HOME 20 | 21 | - name: Clear FORWARD firewall rules 22 | run: ~/actionutils.sh cleaript 23 | 24 | - name: Free disk 25 | run: ~/actionutils.sh free_runner_disk 26 | 27 | - name: Install dependencies 28 | run: ~/actionutils.sh setup_lxd 29 | 30 | - name: Create containers with loopback devices 31 | run: ~/actionutils.sh create_containers public 32 | 33 | - name: Install quincy stable from store 34 | run: ~/actionutils.sh install_store squid/stable 35 | 36 | - name: Bootstrap 37 | run: ~/actionutils.sh bootstrap_head 38 | 39 | - name: Setup cluster 40 | run: ~/actionutils.sh cluster_nodes 41 | 42 | - name: Add 3 OSDs 43 | run: | 44 | for c in node-wrk0 node-wrk1 node-wrk2 ; do 45 | ~/actionutils.sh add_osd_to_node $c 46 | done 47 | ~/actionutils.sh headexec wait_for_osds 3 48 | 49 | - name: Enable RGW 50 | run: ~/actionutils.sh headexec enable_rgw 51 | 52 | - name: Exercise RGW 53 | run: ~/actionutils.sh headexec testrgw 54 | 55 | - name: Upgrade to candidate 56 | run: ~/actionutils.sh refresh_snap squid/candidate 57 | 58 | - name: Wait until 3 OSDs are up 59 | run: ~/actionutils.sh headexec wait_for_osds 3 60 | 61 | - name: Verify config 62 | run: ~/actionutils.sh test_ceph_conf 63 | 64 | - name: Exercise RGW again 65 | run: ~/actionutils.sh headexec testrgw 66 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.snap 2 | _build 3 | *.swp 4 | .idea 5 | .vscode 6 | microceph/coverage.out 7 | microceph/coverage.html 8 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | build: 3 | os: "ubuntu-22.04" 4 | tools: 5 | python: "3.11" 6 | jobs: 7 | post_checkout: 8 | - git fetch --unshallow || true 9 | 10 | sphinx: 11 | builder: dirhtml 12 | configuration: docs/conf.py 13 | fail_on_warning: true 14 | 15 | python: 16 | install: 17 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | To report a security issue, file a [Private Security 2 | Report](https://github.com/canonical/microceph/security/advisories) with 3 | a description of the issue, the steps you took to create the issue, affected 4 | versions, and, if known, mitigations for the issue. 5 | 6 | The [Ubuntu Security disclosure and embargo 7 | policy](https://ubuntu.com/security/disclosure-policy) contains more 8 | information about what you can expect when you contact us and what we expect 9 | from you. 10 | -------------------------------------------------------------------------------- /assets/add_osd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/assets/add_osd.png -------------------------------------------------------------------------------- /assets/bootstrap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/assets/bootstrap.png -------------------------------------------------------------------------------- /assets/enable_rgw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/assets/enable_rgw.png -------------------------------------------------------------------------------- /dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: /microceph/ 5 | schedule: 6 | interval: daily -------------------------------------------------------------------------------- /docs/.custom_wordlist.txt: -------------------------------------------------------------------------------- 1 | openstack 2 | ceph 3 | sudo 4 | Howto 5 | configs 6 | osd 7 | config 8 | IPv 9 | cfg 10 | CFG 11 | LTS 12 | MicroCeph 13 | microceph 14 | OSDs 15 | MSD 16 | Ceph 17 | CephFs 18 | CephX 19 | Alertmanager 20 | MDS 21 | hostname 22 | mees 23 | loopback 24 | lsblk 25 | hostnames 26 | OSD 27 | keyring 28 | keyrings 29 | FDE 30 | snapd 31 | RGW 32 | MicroCeph's 33 | LUKS 34 | cryptsetup 35 | dm 36 | modinfo 37 | newFs 38 | subcommands 39 | backend 40 | backfilling 41 | overstrained 42 | Ceph's 43 | Scalable 44 | scalability 45 | CephHealthWarning 46 | deduplicating 47 | CIDR 48 | RADOS 49 | flipside 50 | Pre 51 | mds 52 | mon 53 | rgw 54 | rbd 55 | RBD 56 | MgrReports 57 | scalable 58 | Mattermost 59 | integratable 60 | cfg 61 | conf 62 | qemu 63 | writethrough 64 | writeback 65 | IOPS 66 | noout 67 | Noout 68 | Unsetting 69 | cephfs 70 | fs 71 | filesystem 72 | filesystems 73 | sda 74 | ESM 75 | Livepatch 76 | 77 | vms 78 | resync 79 | failover 80 | Failover 81 | Failback 82 | 83 | MiB 84 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | /*env*/ 2 | .sphinx/venv/ 3 | .sphinx/warnings.txt 4 | .sphinx/.wordlist.dic 5 | .sphinx/.doctrees/ 6 | _build 7 | .DS_Store 8 | __pycache__ 9 | .idea/ 10 | -------------------------------------------------------------------------------- /docs/.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | builder: dirhtml 17 | configuration: docs/conf.py 18 | fail_on_warning: true 19 | 20 | # If using Sphinx, optionally build your docs in additional formats such as PDF 21 | formats: 22 | - pdf 23 | 24 | # Optionally declare the Python requirements required to build your docs 25 | python: 26 | install: 27 | - requirements: docs/.sphinx/requirements.txt 28 | -------------------------------------------------------------------------------- /docs/.sphinx/_static/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/docs/.sphinx/_static/favicon.png -------------------------------------------------------------------------------- /docs/.sphinx/_static/github_issue_links.css: -------------------------------------------------------------------------------- 1 | .github-issue-link-container { 2 | padding-right: 0.5rem; 3 | } 4 | .github-issue-link { 5 | font-size: var(--font-size--small); 6 | font-weight: bold; 7 | background-color: #DD4814; 8 | padding: 13px 23px; 9 | text-decoration: none; 10 | } 11 | .github-issue-link:link { 12 | color: #FFFFFF; 13 | } 14 | .github-issue-link:visited { 15 | color: #FFFFFF 16 | } 17 | .muted-link.github-issue-link:hover { 18 | color: #FFFFFF; 19 | text-decoration: underline; 20 | } 21 | .github-issue-link:active { 22 | color: #FFFFFF; 23 | text-decoration: underline; 24 | } 25 | -------------------------------------------------------------------------------- /docs/.sphinx/_static/github_issue_links.js: -------------------------------------------------------------------------------- 1 | // if we already have an onload function, save that one 2 | var prev_handler = window.onload; 3 | 4 | window.onload = function() { 5 | // call the previous onload function 6 | if (prev_handler) { 7 | prev_handler(); 8 | } 9 | 10 | const link = document.createElement("a"); 11 | link.classList.add("muted-link"); 12 | link.classList.add("github-issue-link"); 13 | link.text = "Give feedback"; 14 | link.href = ( 15 | github_url 16 | + "/issues/new?" 17 | + "title=docs%3A+TYPE+YOUR+QUESTION+HERE" 18 | + "&body=*Please describe the question or issue you're facing with " 19 | + `"${document.title}"` 20 | + ".*" 21 | + "%0A%0A%0A%0A%0A" 22 | + "---" 23 | + "%0A" 24 | + `*Reported+from%3A+${location.href}*` 25 | ); 26 | link.target = "_blank"; 27 | 28 | const div = document.createElement("div"); 29 | div.classList.add("github-issue-link-container"); 30 | div.append(link) 31 | 32 | const container = document.querySelector(".article-container > .content-icon-container"); 33 | container.prepend(div); 34 | }; 35 | -------------------------------------------------------------------------------- /docs/.sphinx/_static/header-nav.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function() { 2 | $(document).on("click", function () { 3 | $(".more-links-dropdown").hide(); 4 | }); 5 | 6 | $('.nav-more-links').click(function(event) { 7 | $('.more-links-dropdown').toggle(); 8 | event.stopPropagation(); 9 | }); 10 | }) 11 | -------------------------------------------------------------------------------- /docs/.sphinx/_static/tag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/docs/.sphinx/_static/tag.png -------------------------------------------------------------------------------- /docs/.sphinx/_templates/base.html: -------------------------------------------------------------------------------- 1 | {% extends "furo/base.html" %} 2 | 3 | {% block theme_scripts %} 4 | 7 | {% endblock theme_scripts %} 8 | 9 | {# ru-fu: don't include the color variables from the conf.py file, but use a 10 |  separate CSS file to save space #} 11 | {% block theme_styles %} 12 | {% endblock theme_styles %} 13 | -------------------------------------------------------------------------------- /docs/.sphinx/_templates/header.html: -------------------------------------------------------------------------------- 1 | 37 | -------------------------------------------------------------------------------- /docs/.sphinx/_templates/page.html: -------------------------------------------------------------------------------- 1 | {% extends "furo/page.html" %} 2 | 3 | {% block footer %} 4 | {% include "footer.html" %} 5 | {% endblock footer %} 6 | 7 | {% block body -%} 8 | {% include "header.html" %} 9 | {{ super() }} 10 | {%- endblock body %} 11 | 12 | {% if meta and ((meta.discourse and discourse_prefix) or meta.relatedlinks) %} 13 | {% set furo_hide_toc_orig = furo_hide_toc %} 14 | {% set furo_hide_toc=false %} 15 | {% endif %} 16 | 17 | {% block right_sidebar %} 18 |
19 | {% if not furo_hide_toc_orig %} 20 |
21 | 22 | {{ _("Contents") }} 23 | 24 |
25 |
26 |
27 | {{ toc }} 28 |
29 |
30 | {% endif %} 31 | {% if meta and ((meta.discourse and discourse_prefix) or meta.relatedlinks) %} 32 | 37 | 47 | {% endif %} 48 |
49 | {% endblock right_sidebar %} 50 | -------------------------------------------------------------------------------- /docs/.sphinx/requirements.txt: -------------------------------------------------------------------------------- 1 | furo 2 | linkify-it-py 3 | lxd-sphinx-extensions 4 | myst-parser 5 | pyspelling 6 | sphinx 7 | sphinx-autobuild 8 | sphinx-copybutton 9 | sphinx-design 10 | sphinx-notfound-page 11 | sphinx-reredirects 12 | sphinx-tabs 13 | sphinxcontrib-jquery 14 | sphinxext-opengraph 15 | sphinx-version-warning 16 | -------------------------------------------------------------------------------- /docs/.sphinx/spellingcheck.yaml: -------------------------------------------------------------------------------- 1 | matrix: 2 | - name: rST files 3 | aspell: 4 | lang: en 5 | d: en_GB 6 | dictionary: 7 | wordlists: 8 | - .wordlist.txt 9 | - .custom_wordlist.txt 10 | output: .sphinx/.wordlist.dic 11 | sources: 12 | - _build/**/*.html|!_build/reference/release-notes/** 13 | pipeline: 14 | - pyspelling.filters.html: 15 | comments: false 16 | attributes: 17 | - title 18 | - alt 19 | ignores: 20 | - code 21 | - pre 22 | - spellexception 23 | - link 24 | - title 25 | - strong.command 26 | - div.relatedlinks 27 | - div.visually-hidden 28 | - img 29 | - a.p-navigation__link 30 | -------------------------------------------------------------------------------- /docs/.wokeignore: -------------------------------------------------------------------------------- 1 | # the cheat sheets contain a link to a repository with a block word which we 2 | # cannot avoid for now, ie 3 | # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html 4 | doc-cheat-sheet* 5 | -------------------------------------------------------------------------------- /docs/.wordlist.txt: -------------------------------------------------------------------------------- 1 | addons 2 | api 3 | API 4 | APIs 5 | Auth 6 | auth 7 | authorization 8 | auto-generated 9 | automations 10 | backticks 11 | balancer 12 | capitalize 13 | capitalized 14 | catalog 15 | centralized 16 | Centralized 17 | CephFS 18 | Cephx 19 | Charmhub 20 | CLA 21 | CLI 22 | color 23 | containerized 24 | cryptographic 25 | Cryptographic 26 | CVEs 27 | DB 28 | dev 29 | Diátaxis 30 | docsacademy 31 | dqlite 32 | dropdown 33 | EBS 34 | EKS 35 | favicon 36 | firewalld 37 | firewalled 38 | firewalling 39 | Fosstodon 40 | Furo 41 | Gb 42 | GiB 43 | Grafana 44 | hearted 45 | HTTPS 46 | IAM 47 | installable 48 | Intra 49 | iptables 50 | ise 51 | ize 52 | Jira 53 | journald 54 | JSON 55 | Juju 56 | JWTs 57 | kern 58 | keysize 59 | Kubeflow 60 | Kubernetes 61 | Makefile 62 | manpage 63 | mentorship 64 | MicroCeph 65 | microceph 66 | microcephd 67 | microcephd's 68 | Microcluster 69 | monospaced 70 | mortem 71 | MyST 72 | namespace 73 | namespaces 74 | nftables 75 | NodePort 76 | nr 77 | observability 78 | OLM 79 | OpenSSL 80 | Permalink 81 | PKI 82 | POSIX 83 | pre 84 | prometheus 85 | radosgw 86 | ReadMe 87 | reST 88 | RESTful 89 | reStructuredText 90 | rsyslog 91 | RTD 92 | sandboxed 93 | sandboxing 94 | Sandboxing 95 | sdb 96 | ssl 97 | SSL 98 | subdirectories 99 | subfolders 100 | subnets 101 | subtree 102 | subtrees 103 | syslog 104 | systemd 105 | TCP 106 | TLS 107 | ufw 108 | UI 109 | unauthorized 110 | untrusted 111 | url 112 | USNs 113 | Utilize 114 | utilizing 115 | VLANs 116 | VM 117 | WAL 118 | YAML 119 | -------------------------------------------------------------------------------- /docs/explanation/assets/flow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/docs/explanation/assets/flow.jpg -------------------------------------------------------------------------------- /docs/explanation/cluster-configurations.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Cluster network configurations 3 | ============================== 4 | 5 | Overview 6 | -------- 7 | 8 | Network configuration is critical for building a high performance Ceph Storage Cluster. 9 | 10 | Ceph clients make requests directly to Ceph OSD Daemons i.e. Ceph does not perform request routing. The OSD Daemons perform data replication on behalf of clients, which means replication and other factors impose additional loads on Ceph Storage Cluster networks. Therefore, to enhance security and stability, it can be advantageous to split public and cluster network traffic so that client traffic flows on a public net while cluster traffic (for replication and backfilling) utilises a separate net. This helps to prevent malicious or malfunctioning clients from disrupting cluster backend operations. 11 | 12 | For more details, refer to `Ceph Network Config `_. 13 | 14 | Implementation 15 | -------------- 16 | MicroCeph cluster config subcommands rely on ``ceph config`` as the single source of truth for config values and for getting/setting the configs. After updating (setting/resetting) a config value, a restart request is sent to other hosts on the MicroCeph cluster for restarting particular daemons. This is done for the change to take effect. 17 | 18 | In a multi-node MicroCeph cluster, restarting the daemons is done cautiously in a synchronous manner to prevent cluster outage. The flow diagram below explains the order of execution. 19 | 20 | .. figure:: assets/flow.jpg 21 | 22 | Execution flow of config set/reset commands in multi-node MicroCeph deployment 23 | -------------------------------------------------------------------------------- /docs/explanation/cluster-maintenance.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | Maintenance Mode 3 | ================ 4 | 5 | Overview 6 | -------- 7 | 8 | Cluster maintenance is important for keeping the Ceph Storage Cluster at a healthy state. 9 | 10 | MicroCeph provides a simple and consistent workflow to support maintenance activity. Before 11 | executing any high-risk maintenance operations on a node, operators are strongly recommended to 12 | enable maintenance mode to minimise the impact and ensure system stability. For more information on how 13 | to enable maintenance mode in MicroCeph, please refer to :doc:`Perform cluster 14 | maintenance`. 15 | 16 | Strategy 17 | -------- 18 | 19 | Bringing a node into and out of maintenance mode generally follows check-and-apply pattern. We 20 | first verify if the node is ready for maintenance operations, then run the steps to bring the node 21 | into or out of maintenance mode if the verification passes. The strategy is idempotent, you can 22 | repeatedly run the steps without any issue. 23 | 24 | The strategy is defined as follows: 25 | 26 | Enabling maintenance mode 27 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 28 | 29 | - Check if OSDs on the node are ``ok-to-stop`` to ensure sufficient redundancy to tolerate the loss 30 | of OSDs on the node. 31 | - Check if the number of running services is greater than the minimum (3 MON, 1 MDS, 1 MGR) 32 | required for quorum. 33 | - *(Optional)* Apply noout flag to prevent data migration from triggering during the planned 34 | maintenance slot. (default=True) 35 | - *(Optional)* Bring the OSDs down and disable the service (Default=False) 36 | 37 | Disabling maintenance mode 38 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 39 | 40 | - Remove noout flag to allow data migration from triggering after the planned maintenance slot. 41 | - Bring the OSDs up and enable the service 42 | 43 | 44 | -------------------------------------------------------------------------------- /docs/explanation/index.rst: -------------------------------------------------------------------------------- 1 | Explanation 2 | =========== 3 | 4 | The explanatory and conceptual guides in this section provide a better understanding of MicroCeph. 5 | They enable you to expand your knowledge and become better at configuring, encrypting, managing, deploying and backing up your workloads. 6 | 7 | Working with MicroCeph 8 | ----------------------- 9 | 10 | Understand the steps to take to successfully deploy and manage your Ceph clusters quickly. 11 | 12 | .. toctree:: 13 | :maxdepth: 1 14 | 15 | cluster-configurations 16 | cluster-maintenance 17 | cluster-scaling 18 | taking-snapshots 19 | 20 | The Snap content interface 21 | -------------------------- 22 | 23 | Access MicroCeph's configuration and credentials. 24 | 25 | .. toctree:: 26 | :maxdepth: 1 27 | 28 | snap-content-interface 29 | 30 | The MicroCeph charm 31 | -------------------- 32 | 33 | The MicroCeph charm helps you deploy and manage your MicroCeph deployment with `Juju`_. 34 | 35 | .. LINKS 36 | .. _Juju: https://juju.is/ 37 | 38 | .. toctree:: 39 | :maxdepth: 1 40 | 41 | microceph-charm 42 | 43 | Security in MicroCeph 44 | --------------------- 45 | 46 | Learn about security approaches in MicroCeph, e.g. enabling support for MicroCeph's automatic full disk encryption on OSDs and 47 | cryptographic technology used in MicroCeph. 48 | 49 | .. toctree:: 50 | :maxdepth: 2 51 | 52 | Security in MicroCeph 53 | 54 | If you have a specific goal, but are already familiar with MicroCeph, our :doc:`how-to guides <../how-to/index>` have more in-depth detail and instructions. 55 | 56 | Take a look at our :doc:`reference <../reference/index>` section when you need to know which MicroCeph commands to use. 57 | -------------------------------------------------------------------------------- /docs/explanation/microceph-charm.rst: -------------------------------------------------------------------------------- 1 | The MicroCeph charm 2 | =================== 3 | 4 | The MicroCeph charm is used to incorporate MicroCeph into Juju-managed 5 | deployments. It offers an alternative method for deploying and managing 6 | MicroCeph. In effect, the charm installs the ``microceph`` snap. As expected, 7 | it provides MicroCeph management via standard Juju commands (e.g. ``juju 8 | config`` and ``juju run``). 9 | 10 | For more information, see the `microceph`_ entry on the Charmhub. 11 | 12 | .. LINKS 13 | .. _microceph: https://charmhub.io/microceph 14 | -------------------------------------------------------------------------------- /docs/explanation/snap-content-interface.rst: -------------------------------------------------------------------------------- 1 | ==================================== 2 | Snap content interface for MicroCeph 3 | ==================================== 4 | 5 | Overview 6 | -------- 7 | 8 | Snap content interfaces enable access to a particular directory from a producer snap. The MicroCeph ``ceph-conf`` content interface is designed to facilitate access to MicroCeph's configuration and credentials. This interface includes information about MON addresses, enabling a consumer snap to connect to the MicroCeph cluster using this data. 9 | 10 | Additionally, the ``ceph-conf`` content interface also provides version information of the running Ceph software. 11 | 12 | Usage 13 | ----- 14 | 15 | The usage of the ``ceph-conf`` interface revolves around providing the consuming snap access to necessary configuration details. 16 | 17 | Here is how it can be utilised: 18 | 19 | - Connect to the ``ceph-conf`` content interface to gain access to MicroCeph's configuration and credentials. 20 | - The interface exposes a standard ``ceph.conf`` configuration file as well Ceph keyrings with administrative privileges. 21 | - Use the MON addresses included in the configuration to connect to the MicroCeph cluster. 22 | - The interface provides version information that can be used to set up version-specific clients. 23 | 24 | To connect the ``ceph-conf`` content interface to a consumer snap, use the following command: 25 | 26 | :: 27 | 28 | snap connect :ceph-conf microceph:ceph-conf 29 | 30 | 31 | Replace ```` with the name of your consumer snap. Once executed, this command establishes a connection between the consumer snap and the MicroCeph ``ceph-conf`` interface. 32 | 33 | 34 | -------------------------------------------------------------------------------- /docs/explanation/taking-snapshots.rst: -------------------------------------------------------------------------------- 1 | ================================ 2 | Taking Backups for your Workload 3 | ================================ 4 | 5 | The MicroCeph deployed Ceph cluster supports snapshot based backups 6 | for Block and File based workloads. 7 | 8 | This document is an index of upstream documentation available for snapshots 9 | along with some bridging commentary to help understand it better. 10 | 11 | RBD Snapshots: 12 | -------------- 13 | 14 | Ceph supports creating point in time read-only logical copies. This allows 15 | an operator to create a checkpoint for their workload backup. The snapshots 16 | can be exported for external backup or kept in Ceph for rollback to older version. 17 | 18 | Pre-requisites 19 | ++++++++++++++ 20 | 21 | Refer to :doc:`How to mount MicroCeph Block Devices <../how-to/mount-block-device>` 22 | for getting started with RBD. 23 | 24 | Once you have a the block device mounted and in use, you can jump to 25 | `Ceph RBD Snapshots`_ 26 | 27 | CephFs Snapshots: 28 | ----------------- 29 | 30 | Similar to RBD snapshots, CephFs snapshots are read-only logical copies of **any chosen sub-directory** 31 | of the corresponding filesystem. 32 | 33 | Pre-requisites 34 | ++++++++++++++ 35 | 36 | Refer to :doc:`How to mount MicroCeph CephFs shares <../how-to/mount-cephfs-share>` 37 | for getting started with CephFs. 38 | 39 | Once you have a the filesystem mounted and in use, you can jump to 40 | `CephFs Snapshots`_ 41 | 42 | .. LINKS 43 | 44 | .. _Ceph RBD Snapshots: https://docs.ceph.com/en/latest/rbd/rbd-snapshot/ 45 | .. _CephFs Snapshots: https://docs.ceph.com/en/latest/dev/cephfs-snapshots/ -------------------------------------------------------------------------------- /docs/how-to/assets/alerts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/docs/how-to/assets/alerts -------------------------------------------------------------------------------- /docs/how-to/assets/prometheus_console.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/docs/how-to/assets/prometheus_console.jpg -------------------------------------------------------------------------------- /docs/how-to/assets/prometheus_microceph_scraping.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/canonical/microceph/cc6890d6a04cbb1f0dfc5ea0a4b776f764ed357d/docs/how-to/assets/prometheus_microceph_scraping.jpg -------------------------------------------------------------------------------- /docs/how-to/change-log-level.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | Changing the log level 3 | ====================== 4 | 5 | By default, the MicroCeph daemon runs with the log level set to DEBUG. While that is the desirable 6 | behaviour for a good number of use cases, there are instances when this level is far too high - 7 | for example, embedded devices where storage is much more limited. For these reasons, the MicroCeph 8 | daemon exposes a way to both get and set the log level. 9 | 10 | Configuring the log level 11 | ------------------------- 12 | 13 | MicroCeph includes the command ``log``, with the sub-commands ``set-level`` and ``get-level``. When setting, we support both string and integer formats for the log level. For example: 14 | 15 | .. code-block:: none 16 | 17 | sudo microceph log set-level warning 18 | sudo microceph log set-level 3 19 | 20 | Both commands are equivalent. The mapping from integer to string can be consulted by querying the 21 | help for the ``set-level`` sub-command. Note that any changes made to the log level take effect 22 | immediately, and need no restarts. 23 | 24 | On the other hand, the ``get-level`` sub-command takes no arguments and returns an integer level only. 25 | Any value returned by ``get-level`` can be used for ``set-level``. 26 | 27 | For example, after setting the level as shown in the example, we can verify in this way: 28 | 29 | .. code-block:: none 30 | 31 | sudo microceph log get-level 32 | 3 33 | 34 | 35 | -------------------------------------------------------------------------------- /docs/how-to/configure-network-keys.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Configuring Cluster network 3 | ============================ 4 | 5 | If you configure a cluster network, OSDs will route heartbeat, object replication and recovery traffic over the cluster network. This may improve performance compared to using a single network. 6 | 7 | The MicroCeph cluster configuration CLI supports setting, getting, resetting and listing supported config keys mentioned below. 8 | 9 | .. list-table:: Supported Config Keys 10 | :widths: 30 70 11 | :header-rows: 1 12 | 13 | * - Key 14 | - Description 15 | * - cluster_network 16 | - Set this key to desired CIDR to configure cluster network 17 | 18 | 1. Supported config keys can be configured using the 'set' command: 19 | 20 | .. code-block:: shell 21 | 22 | $ sudo microceph cluster config set cluster_network 10.5.0.0/16 23 | 24 | 2. Config value for a particular key could be queried using the 'get' command: 25 | 26 | .. code-block:: shell 27 | 28 | $ sudo microceph cluster config get cluster_network 29 | +---+-----------------+-------------+ 30 | | # | KEY | VALUE | 31 | +---+-----------------+-------------+ 32 | | 0 | cluster_network | 10.5.0.0/16 | 33 | +---+-----------------+-------------+ 34 | 35 | 3. A list of all the configured keys can be fetched using the 'list' command: 36 | 37 | .. code-block:: shell 38 | 39 | $ sudo microceph cluster config list 40 | +---+-----------------+-------------+ 41 | | # | KEY | VALUE | 42 | +---+-----------------+-------------+ 43 | | 0 | cluster_network | 10.5.0.0/16 | 44 | +---+-----------------+-------------+ 45 | 46 | 4. Resetting a config key (i.e. setting the key to its default value) can performed using the 'reset' command: 47 | 48 | .. code-block:: shell 49 | 50 | $ sudo microceph cluster config reset cluster_network 51 | $ sudo microceph cluster config list 52 | +---+-----+-------+ 53 | | # | KEY | VALUE | 54 | +---+-----+-------+ 55 | 56 | For more explanations and implementation details refer to :doc:`explanation <../explanation/cluster-configurations>` 57 | 58 | -------------------------------------------------------------------------------- /docs/how-to/index.rst: -------------------------------------------------------------------------------- 1 | How-to guides 2 | ============= 3 | 4 | Our *how-to* guides give directions on how perform key operations and processes in MicroCeph. 5 | 6 | Installing and initialising MicroCeph cluster 7 | --------------------------------------------- 8 | 9 | The guides in this section are helpful in the installation and initialisation 10 | of both single-node and multi-node clusters. 11 | 12 | .. toctree:: 13 | :maxdepth: 1 14 | 15 | single-node 16 | multi-node 17 | 18 | Configuring your cluster 19 | ------------------------ 20 | 21 | See these guides for client and network configurations, authentication service integration, and 22 | configuration of metrics, alerts and other service instances. 23 | 24 | .. toctree:: 25 | :maxdepth: 1 26 | 27 | rbd-client-cfg 28 | integrate-keystone 29 | configure-network-keys 30 | enable-metrics 31 | enable-alerts 32 | enable-service-instances 33 | 34 | Interacting with your cluster 35 | ----------------------------- 36 | 37 | Manage your cluster: find steps on how to configure the log level,remove disks, 38 | migrate services and more. 39 | 40 | .. toctree:: 41 | :maxdepth: 1 42 | 43 | change-log-level 44 | migrate-auto-services 45 | remove-disk 46 | perform-cluster-maintenance 47 | 48 | Managing a remote cluster 49 | ------------------------- 50 | 51 | Make MicroCeph aware of a remote cluster and configure replication for 52 | RBD pools and images. 53 | 54 | .. toctree:: 55 | :maxdepth: 1 56 | 57 | import-remote-cluster 58 | configure-rbd-mirroring 59 | perform-site-failover 60 | 61 | Upgrading your cluster 62 | ---------------------- 63 | 64 | Follow these steps carefully to perform a major upgrade. 65 | 66 | .. toctree:: 67 | :maxdepth: 1 68 | 69 | major-upgrade 70 | 71 | 72 | Consuming cluster storage 73 | ------------------------- 74 | 75 | Follow these guides to learn how to make use of the storage provided by your cluster. 76 | 77 | .. toctree:: 78 | :maxdepth: 1 79 | 80 | mount-block-device 81 | mount-cephfs-share 82 | 83 | -------------------------------------------------------------------------------- /docs/how-to/major-upgrade.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Major Upgrades 3 | =============== 4 | 5 | 6 | Overview 7 | -------- 8 | 9 | This guide provides step-by-step instructions on how to upgrade your MicroCeph cluster to a new major release. 10 | 11 | Follow these steps carefully to prevent to ensure a smooth transition. 12 | 13 | In the code examples below an upgrade to the Squid stable 14 | release is shown. The procedure should apply to any major release 15 | upgrade in a similar way however. 16 | 17 | 18 | 19 | Procedure 20 | --------- 21 | 22 | 23 | Prerequisites 24 | ~~~~~~~~~~~~~ 25 | 26 | Firstly, before initiating the upgrade, ensure that the cluster is healthy. Use the below command to check the cluster health: 27 | 28 | .. code-block:: none 29 | 30 | sudo ceph -s 31 | 32 | **Note**: Do not start the upgrade if the cluster is unhealthy. 33 | 34 | 35 | Secondly, review the :doc:`release notes ` to check for any version-specific information. 36 | 37 | 38 | 39 | Optional but Recommended: Preparation Steps 40 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 41 | 42 | Carry out these precautionary steps before initiating the upgrade: 43 | 44 | 1. **Back up your data**: as a general precaution, it is recommended to take a backup of your data (such as stored S3 objects, RBD volumes, or cephfs filesystems). 45 | 46 | 2. **Prevent OSDs from dropping out of the cluster**: Run the following command to avoid OSDs from unintentionally dropping out of the cluster during the upgrade process: 47 | 48 | .. code-block:: none 49 | 50 | sudo ceph osd set noout 51 | 52 | 53 | Upgrading Each Cluster Node 54 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 55 | 56 | If your cluster is healthy, proceed with the upgrade by refreshing the snap on each node using the following command: 57 | 58 | .. code-block:: none 59 | 60 | sudo snap refresh microceph --channel squid/stable 61 | 62 | Be sure to perform the refresh on every node in the cluster. 63 | 64 | Verifying the Upgrade 65 | ~~~~~~~~~~~~~~~~~~~~~ 66 | 67 | Once the upgrade process is done, verify that all components have been upgraded correctly. Use the following command to check: 68 | 69 | .. code-block:: none 70 | 71 | sudo ceph versions 72 | 73 | 74 | Unsetting Noout 75 | ~~~~~~~~~~~~~~~ 76 | 77 | If you had previously set noout, unset it with this command: 78 | 79 | .. code-block:: none 80 | 81 | sudo ceph osd unset noout 82 | 83 | 84 | You have now successfully upgraded your Ceph cluster. 85 | 86 | 87 | -------------------------------------------------------------------------------- /docs/how-to/migrate-auto-services.rst: -------------------------------------------------------------------------------- 1 | ============================================ 2 | Migrating automatically-provisioned services 3 | ============================================ 4 | 5 | MicroCeph deploys automatically-provisioned Ceph services when needed. These 6 | services include: 7 | 8 | * MON - `Monitor service`_ 9 | * MDS - `Metadata service`_ 10 | * MGR - `Manager service`_ 11 | 12 | It can however be useful to have the ability to move (or migrate) these 13 | services from one node to another. This may be desirable during a maintenance 14 | window for instance where these services must remain available. 15 | 16 | This is the purpose of the :command:`cluster migrate` command. It enables 17 | automatically-provisioned services on a target node and disables them on the 18 | source node. 19 | 20 | The syntax is: 21 | 22 | .. code-block:: none 23 | 24 | sudo microceph cluster migrate 25 | 26 | Where the source and destination are node names that are available via the 27 | :command:`status` command: 28 | 29 | .. code-block:: none 30 | 31 | sudo microceph status 32 | 33 | Post-migration, the :command:`status` command can also be used to verify the 34 | distribution of services among nodes. 35 | 36 | **Notes:** 37 | 38 | * It's not possible, nor useful, to have more than one instance of an 39 | automatically-provisioned service on any given node. 40 | 41 | * RADOS Gateway services are not considered to be of the 42 | automatically-provisioned type; they are enabled and disabled explicitly on a 43 | node. 44 | 45 | .. LINKS 46 | 47 | .. _Manager service: https://docs.ceph.com/en/latest/mgr/ 48 | .. _Monitor service: https://docs.ceph.com/en/latest/man/8/ceph-mon/ 49 | .. _Metadata service: https://docs.ceph.com/en/latest/man/8/ceph-mds/ 50 | -------------------------------------------------------------------------------- /docs/how-to/perform-cluster-maintenance.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Perform cluster maintenance 3 | =========================== 4 | 5 | Overview 6 | -------- 7 | 8 | MicroCeph provides a simple and consistent workflow to support maintenance activity. 9 | 10 | Before proceeding, please refer to the :doc:`Cluster maintenance` 11 | to understand its functionality and impact. 12 | 13 | Enabling Cluster Maintenance 14 | ---------------------------- 15 | 16 | To review the action plan for enabling maintenance mode, run 17 | 18 | .. code:: text 19 | 20 | microceph cluster maintenance enter --dry-run 21 | 22 | If you only want to verify if the node is ready for maintenance operations, run 23 | 24 | .. code:: text 25 | 26 | microceph cluster maintenance enter --check-only 27 | 28 | By default, noout is set when entering maintenance mode. To disable noout to enable data migration 29 | during maintenance, run 30 | 31 | .. code:: text 32 | 33 | microceph cluster maintenance enter --set-noout=False 34 | 35 | By default, OSDs on the node are not stopped during maintenance mode, To stop the OSD service on 36 | the node during maintenance, run 37 | 38 | .. code:: text 39 | 40 | microceph cluster maintenance enter --stop-osds 41 | 42 | You can also forcibly bring a node into maintenance mode or ignore the safety checks if you know 43 | what you are doing, but it's generally not recommended as it's not guaranteed the node is ready for 44 | maintenance operations. 45 | 46 | .. code:: text 47 | 48 | # Forcibly enter maintenance mode 49 | microceph cluster maintenance enter --force 50 | 51 | # Ignore safety checks when entering maintenance mode 52 | microceph cluster maintenance enter --ignore-check 53 | 54 | 55 | Disabling Cluster Maintenance 56 | ----------------------------- 57 | 58 | To review the action plan for disabling maintenance mode, run 59 | 60 | .. code:: text 61 | 62 | microceph cluster maintenance exit --dry-run 63 | 64 | To bring a node out of maintenance, run 65 | 66 | .. code:: text 67 | 68 | microceph cluster maintenance exit 69 | 70 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/reference/commands/.cmd-template: -------------------------------------------------------------------------------- 1 | ========= 2 | ``{cmd}`` 3 | ========= 4 | 5 | {cmd_help} 6 | 7 | Usage: 8 | 9 | .. code-block:: none 10 | 11 | microceph {cmd} [flags] 12 | microceph {cmd} [command] 13 | 14 | Available commands: 15 | 16 | .. code-block:: none 17 | 18 | {subcmd} {subcmd_help} 19 | 20 | 21 | Global options: 22 | 23 | .. code-block:: none 24 | 25 | -d, --debug Show all debug messages 26 | -h, --help Print help 27 | --state-dir Path to store state information 28 | -v, --verbose Show all information messages 29 | --version Print version number 30 | 31 | 32 | ``subcmd`` 33 | ---------- 34 | 35 | {subcmd_help} 36 | 37 | 38 | Usage: 39 | 40 | .. code-block:: none 41 | 42 | microceph {cmd} {subcmd} {params} 43 | 44 | 45 | Flags: 46 | 47 | .. code-block:: none 48 | 49 | {flag} {flag_help} 50 | -------------------------------------------------------------------------------- /docs/reference/commands/disable.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | ``disable`` 3 | =========== 4 | 5 | Disables a feature on the cluster 6 | 7 | Usage: 8 | 9 | .. code-block:: none 10 | 11 | microceph disable [flags] 12 | microceph disable [command] 13 | 14 | Available Commands: 15 | 16 | .. code-block:: none 17 | 18 | rgw Disable the RGW service on this node 19 | 20 | Global flags: 21 | 22 | .. code-block:: none 23 | 24 | -d, --debug Show all debug messages 25 | -h, --help Print help 26 | --state-dir Path to store state information 27 | -v, --verbose Show all information messages 28 | --version Print version number 29 | 30 | -------------------------------------------------------------------------------- /docs/reference/commands/help.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | ``help`` 3 | ======== 4 | 5 | Help provides help for any command in the application. 6 | Simply type microceph help [path to command] for full details. 7 | 8 | Usage: 9 | 10 | .. code-block:: none 11 | 12 | microceph help [command] [flags] 13 | 14 | Global flags: 15 | 16 | .. code-block:: none 17 | 18 | -d, --debug Show all debug messages 19 | -h, --help Print help 20 | --state-dir Path to store state information 21 | -v, --verbose Show all information messages 22 | --version Print version number 23 | -------------------------------------------------------------------------------- /docs/reference/commands/index.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | MicroCeph CLI Commands 3 | ====================== 4 | 5 | Use these commands to initialise, deploy and manage your MicroCeph cluster. 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | :glob: 10 | 11 | * 12 | 13 | -------------------------------------------------------------------------------- /docs/reference/commands/init.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | ``init`` 3 | ======== 4 | 5 | Initialises MicroCeph (in interactive mode). 6 | 7 | Usage: 8 | 9 | .. code-block:: none 10 | 11 | microceph init [flags] 12 | 13 | Global flags: 14 | 15 | .. code-block:: none 16 | 17 | -d, --debug Show all debug messages 18 | -h, --help Print help 19 | --state-dir Path to store state information 20 | -v, --verbose Show all information messages 21 | --version Print version number 22 | -------------------------------------------------------------------------------- /docs/reference/commands/pool.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | ``pool`` 3 | ======== 4 | 5 | Manages pools in MicroCeph. 6 | 7 | Usage: 8 | 9 | .. code-block:: none 10 | 11 | microceph pool [command] 12 | 13 | Available commands: 14 | 15 | .. code-block:: none 16 | 17 | set-rf Set the replication factor for pools 18 | 19 | Global flags: 20 | 21 | .. code-block:: none 22 | 23 | -d, --debug Show all debug messages 24 | -h, --help Print help 25 | --state-dir Path to store state information 26 | -v, --verbose Show all information messages 27 | --version Print version number 28 | 29 | 30 | ``set-rf`` 31 | ---------- 32 | 33 | Sets the replication factor for one or more pools in the cluster. 34 | The command takes two arguments: The pool specification (a string) and the 35 | replication factor (an integer). 36 | 37 | The pool specification can take one of three forms: Either a list of pools, 38 | separated by a space, in which case the replication factor is applied only to 39 | those pools (provided they exist). It can also be an asterisk ('*') in which 40 | case the process is applied to all existing pools; or an empty string (''), 41 | which sets the default pool size, but doesn't change any existing pools. 42 | 43 | Usage: 44 | 45 | .. code-block:: none 46 | 47 | microceph pool set-rf 48 | -------------------------------------------------------------------------------- /docs/reference/commands/remote.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | ``remote`` 3 | =========== 4 | 5 | Manage MicroCeph remotes. 6 | 7 | Usage: 8 | 9 | .. code-block:: none 10 | 11 | microceph remote [flags] 12 | microceph remote [command] 13 | 14 | Available commands: 15 | 16 | .. code-block:: none 17 | 18 | import Import external MicroCeph cluster as a remote 19 | list List all configured remotes for the site 20 | remove Remove configured remote 21 | 22 | Global options: 23 | 24 | .. code-block:: none 25 | 26 | -d, --debug Show all debug messages 27 | -h, --help Print help 28 | --state-dir Path to store state information 29 | -v, --verbose Show all information messages 30 | --version Print version number 31 | 32 | ``import`` 33 | ---------- 34 | 35 | Import external MicroCeph cluster as a remote 36 | 37 | Usage: 38 | 39 | .. code-block:: none 40 | 41 | microceph remote import [flags] 42 | 43 | Flags: 44 | 45 | .. code-block:: none 46 | 47 | --local-name string friendly local name for cluster 48 | 49 | ``list`` 50 | --------- 51 | 52 | List all configured remotes for the site 53 | 54 | Usage: 55 | 56 | .. code-block:: none 57 | 58 | microceph remote list [flags] 59 | 60 | Flags: 61 | 62 | .. code-block:: none 63 | 64 | --json output as json string 65 | 66 | ``remove`` 67 | ---------- 68 | 69 | Remove configured remote 70 | 71 | Usage: 72 | 73 | .. code-block:: none 74 | 75 | microceph remote remove [flags] 76 | 77 | -------------------------------------------------------------------------------- /docs/reference/commands/status.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | ``status`` 3 | ========== 4 | 5 | Reports the status of the cluster. 6 | 7 | Usage: 8 | 9 | .. code-block:: none 10 | 11 | microceph status [flags] 12 | 13 | Global flags: 14 | 15 | .. code-block:: none 16 | 17 | -d, --debug Show all debug messages 18 | -h, --help Print help 19 | --state-dir Path to store state information 20 | -v, --verbose Show all information messages 21 | --version Print version number 22 | 23 | -------------------------------------------------------------------------------- /docs/reference/index.rst: -------------------------------------------------------------------------------- 1 | Reference 2 | ========= 3 | 4 | Our Reference section provides technical details about MicroCeph, such 5 | as reference information about the command line interface and notes on 6 | major MicroCeph releases. 7 | 8 | 9 | CLI Commands 10 | ------------ 11 | 12 | MicroCeph has a command line interface that can be used to manage a client and the cluster, as well as query the status of any current deployment. 13 | Each command is documented separately, or use the help argument from the command line to learn more about the commands while working with MicroCeph, 14 | with ``microceph help``. 15 | 16 | .. toctree:: 17 | :maxdepth: 1 18 | 19 | commands/index 20 | 21 | 22 | Release Notes 23 | ------------- 24 | 25 | The release notes section provides details on major MicroCeph releases. 26 | 27 | .. toctree:: 28 | :maxdepth: 1 29 | 30 | release-notes -------------------------------------------------------------------------------- /docs/reuse/links.txt: -------------------------------------------------------------------------------- 1 | .. _reStructuredText style guide: https://canonical-documentation-with-sphinx-and-readthedocscom.readthedocs-hosted.com/style-guide/ 2 | .. _Read the Docs at Canonical: https://library.canonical.com/documentation/read-the-docs 3 | .. _How to publish documentation on Read the Docs: https://library.canonical.com/documentation/publish-on-read-the-docs 4 | .. _Example product documentation: https://canonical-example-product-documentation.readthedocs-hosted.com/ 5 | -------------------------------------------------------------------------------- /microceph/Makefile: -------------------------------------------------------------------------------- 1 | pkg_version=$(shell apt-cache policy ceph-common | awk '/Candidate:/{ print $$2 }' ) 2 | git_version=$(shell git describe --always --dirty --abbrev=10) 3 | MC_VERSION=ceph-version: $(pkg_version); microceph-git: $(git_version) 4 | LDFLAGS="-X 'github.com/canonical/microceph/microceph/version.version=$(MC_VERSION)'" 5 | 6 | .PHONY: default 7 | default: build 8 | 9 | # Build targets. 10 | .PHONY: build 11 | build: 12 | go install -v -ldflags $(LDFLAGS) ./cmd/microceph 13 | go install -v -ldflags $(LDFLAGS) ./cmd/microcephd 14 | 15 | # Testing targets. 16 | .PHONY: check 17 | check: check-static check-unit check-system 18 | 19 | .PHONY: check-unit 20 | check-unit: 21 | go test -cover -coverprofile=coverage.out ./... 22 | go tool cover -html=coverage.out -o coverage.html 23 | 24 | .PHONY: check-system 25 | check-system: 26 | true 27 | 28 | .PHONY: check-static 29 | check-static: 30 | ifeq ($(shell command -v golangci-lint 2> /dev/null),) 31 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest 32 | endif 33 | ifeq ($(shell command -v shellcheck 2> /dev/null),) 34 | echo "Please install shellcheck" 35 | exit 1 36 | endif 37 | ifeq ($(shell command -v revive 2> /dev/null),) 38 | go install github.com/mgechev/revive@latest 39 | endif 40 | golangci-lint run --timeout 5m 41 | revive -set_exit_status ./... 42 | 43 | # Update targets. 44 | .PHONY: update-gomod 45 | update-gomod: 46 | go get -u ./... 47 | 48 | # Static pins 49 | go get github.com/canonical/lxd@stable-5.21 # Stay on v2 dqlite and LXD LTS client 50 | 51 | go mod tidy 52 | go get toolchain@none 53 | 54 | # Update lxd-generate generated database helpers. 55 | .PHONY: update-schema 56 | update-schema: 57 | go generate ./... 58 | gofmt -s -w ./database/ 59 | goimports -w ./database/ 60 | @echo "Code generation completed" 61 | 62 | -------------------------------------------------------------------------------- /microceph/api/cluster.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "regexp" 8 | 9 | "github.com/canonical/lxd/lxd/response" 10 | "github.com/canonical/lxd/shared/logger" 11 | "github.com/canonical/microceph/microceph/api/types" 12 | "github.com/canonical/microceph/microceph/ceph" 13 | "github.com/canonical/microceph/microceph/constants" 14 | "github.com/canonical/microceph/microceph/interfaces" 15 | "github.com/canonical/microcluster/v2/rest" 16 | "github.com/canonical/microcluster/v2/state" 17 | ) 18 | 19 | var clusterCmd = rest.Endpoint{ 20 | Path: "cluster", 21 | Get: rest.EndpointAction{Handler: cmdClusterGet, ProxyTarget: false}, 22 | } 23 | 24 | // cmdClusterGet returns a json dump of microceph configs suitable for connecting from a remote cluster 25 | // This also creates a new key based on the remote name with admin privs. 26 | func cmdClusterGet(s state.State, r *http.Request) response.Response { 27 | // Fetch request params. 28 | var req types.ClusterExportRequest 29 | err := json.NewDecoder(r.Body).Decode(&req) 30 | if err != nil { 31 | return response.InternalError(err) 32 | } 33 | 34 | // Check that the cluster name is conformant. 35 | isOk, err := regexp.MatchString(constants.ClusterNameRegex, req.RemoteName) 36 | if err != nil || !isOk { 37 | err := fmt.Errorf("cluster names can only have [a-z] or [0-9] characters: %w", err) 38 | logger.Error(err.Error()) 39 | return response.BadRequest(err) 40 | } 41 | 42 | // fetch the cluster configurations from dqlite 43 | configs, err := ceph.GetConfigDb(r.Context(), interfaces.CephState{State: s}) 44 | if err != nil { 45 | err := fmt.Errorf("failed to get config db: %w", err) 46 | logger.Error(err.Error()) 47 | return response.InternalError(err) 48 | } 49 | 50 | // generate client keys 51 | clientKey, err := ceph.CreateClientKey( 52 | req.RemoteName, 53 | []string{"mon", "allow *"}, 54 | []string{"osd", "allow *"}, 55 | []string{"mds", "allow *"}, 56 | []string{"mgr", "allow *"}, 57 | ) 58 | if err != nil { 59 | return response.InternalError(err) 60 | } 61 | 62 | // replace admin key with remote client key. 63 | delete(configs, constants.AdminKeyringFieldName) 64 | configs[fmt.Sprintf(constants.AdminKeyringTemplate, req.RemoteName)] = clientKey 65 | 66 | data, err := json.Marshal(configs) 67 | if err != nil { 68 | err := fmt.Errorf("failed to marshal response data: %w", err) 69 | logger.Error(err.Error()) 70 | return response.InternalError(err) 71 | } 72 | 73 | return response.SyncResponse(true, data) 74 | } 75 | -------------------------------------------------------------------------------- /microceph/api/microceph_configs.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | 7 | "github.com/canonical/lxd/lxd/response" 8 | "github.com/canonical/lxd/shared/logger" 9 | "github.com/canonical/microceph/microceph/api/types" 10 | "github.com/canonical/microceph/microceph/ceph" 11 | "github.com/canonical/microcluster/v2/rest" 12 | "github.com/canonical/microcluster/v2/state" 13 | ) 14 | 15 | // top level microceph API 16 | var microcephCmd = rest.Endpoint{ 17 | Path: "microceph", 18 | } 19 | 20 | // microceph configs API 21 | var microcephConfigsCmd = rest.Endpoint{ 22 | Path: "microceph/configs", 23 | } 24 | 25 | var logLevelCmd = rest.Endpoint{ 26 | Path: "microceph/configs/log-level", 27 | Put: rest.EndpointAction{Handler: logLevelPut, ProxyTarget: true}, 28 | Get: rest.EndpointAction{Handler: logLevelGet, ProxyTarget: true}, 29 | } 30 | 31 | func logLevelPut(s state.State, r *http.Request) response.Response { 32 | var req types.LogLevelPut 33 | 34 | err := json.NewDecoder(r.Body).Decode(&req) 35 | if err != nil { 36 | return response.InternalError(err) 37 | } 38 | 39 | logger.Debugf("cmdLogLevelPut: %v", req) 40 | err = ceph.SetLogLevel(req.Level) 41 | if err != nil { 42 | return response.SmartError(err) 43 | } 44 | 45 | logger.Debugf("cmdLogLevelPut done: %v", req) 46 | return response.EmptySyncResponse 47 | } 48 | 49 | func logLevelGet(s state.State, r *http.Request) response.Response { 50 | return response.SyncResponse(true, ceph.GetLogLevel()) 51 | } 52 | -------------------------------------------------------------------------------- /microceph/api/pool.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | 7 | "github.com/canonical/lxd/shared/logger" 8 | 9 | "github.com/canonical/lxd/lxd/response" 10 | "github.com/canonical/microcluster/v2/rest" 11 | "github.com/canonical/microcluster/v2/state" 12 | 13 | "github.com/canonical/microceph/microceph/api/types" 14 | "github.com/canonical/microceph/microceph/ceph" 15 | ) 16 | 17 | // /1.0/pools-op endpoint. 18 | var poolsOpCmd = rest.Endpoint{ 19 | Path: "pools-op", 20 | Put: rest.EndpointAction{Handler: cmdPoolsPut, ProxyTarget: true}, 21 | } 22 | 23 | // /1.0/pools endpoint. 24 | var poolsCmd = rest.Endpoint{ 25 | Path: "pools", 26 | Get: rest.EndpointAction{Handler: cmdPoolsGet, ProxyTarget: true}, 27 | } 28 | 29 | func cmdPoolsGet(s state.State, r *http.Request) response.Response { 30 | logger.Debug("cmdPoolGet") 31 | pools, err := ceph.GetOSDPools() 32 | if err != nil { 33 | return response.SmartError(err) 34 | } 35 | 36 | logger.Debug("cmdPoolGet done") 37 | 38 | return response.SyncResponse(true, pools) 39 | } 40 | 41 | func cmdPoolsPut(s state.State, r *http.Request) response.Response { 42 | var req types.PoolPut 43 | 44 | err := json.NewDecoder(r.Body).Decode(&req) 45 | if err != nil { 46 | return response.InternalError(err) 47 | } 48 | 49 | logger.Debugf("cmdPoolPut: %v", req) 50 | err = ceph.SetReplicationFactor(req.Pools, req.Size) 51 | if err != nil { 52 | return response.SmartError(err) 53 | } 54 | 55 | logger.Debugf("cmdPoolPut done: %v", req) 56 | return response.EmptySyncResponse 57 | } 58 | -------------------------------------------------------------------------------- /microceph/api/resources.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/canonical/lxd/lxd/resources" 7 | "github.com/canonical/lxd/lxd/response" 8 | "github.com/canonical/microcluster/v2/rest" 9 | "github.com/canonical/microcluster/v2/state" 10 | ) 11 | 12 | // /1.0/resources endpoint. 13 | var resourcesCmd = rest.Endpoint{ 14 | Path: "resources", 15 | 16 | Get: rest.EndpointAction{Handler: cmdResourcesGet, ProxyTarget: true}, 17 | } 18 | 19 | func cmdResourcesGet(s state.State, r *http.Request) response.Response { 20 | storage, err := resources.GetStorage() 21 | if err != nil { 22 | return response.InternalError(err) 23 | } 24 | 25 | return response.SyncResponse(true, storage) 26 | } 27 | -------------------------------------------------------------------------------- /microceph/api/servers.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "github.com/canonical/microcluster/v2/rest" 5 | 6 | "github.com/canonical/microceph/microceph/api/types" 7 | ) 8 | 9 | var Servers = map[string]rest.Server{ 10 | "microceph": { 11 | CoreAPI: true, 12 | ServeUnix: true, 13 | Resources: []rest.Resources{ 14 | { 15 | PathPrefix: types.ExtendedPathPrefix, 16 | Endpoints: []rest.Endpoint{ 17 | disksCmd, 18 | disksDelCmd, 19 | resourcesCmd, 20 | servicesCmd, 21 | configsCmd, 22 | restartServiceCmd, 23 | mdsServiceCmd, 24 | mgrServiceCmd, 25 | monServiceCmd, 26 | poolsOpCmd, 27 | rgwServiceCmd, 28 | rbdMirroServiceCmd, 29 | poolsCmd, 30 | clientCmd, 31 | clientConfigsCmd, 32 | clientConfigsKeyCmd, 33 | microcephCmd, 34 | microcephConfigsCmd, 35 | logLevelCmd, 36 | clusterCmd, 37 | remoteCmd, 38 | remoteNameCmd, 39 | opsCmd, 40 | // Remote Replication APIs 41 | opsReplicationCmd, 42 | opsReplicationWorkloadCmd, 43 | opsReplicationResourceCmd, 44 | // Maintenance APIs 45 | opsMaintenanceNodeCmd, 46 | }, 47 | }, 48 | }, 49 | }, 50 | } 51 | -------------------------------------------------------------------------------- /microceph/api/types/client_configs.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // ClientConfig type holds parameters from the `client config` API request 4 | type ClientConfig struct { 5 | Key string `json:"key" yaml:"key"` 6 | Value string `json:"value" yaml:"value"` 7 | Host string `json:"host" yaml:"host"` 8 | Wait bool `json:"wait" yaml:"wait"` 9 | } 10 | 11 | // ClientConfigs is a slice of client configs 12 | type ClientConfigs []ClientConfig 13 | -------------------------------------------------------------------------------- /microceph/api/types/configs.go: -------------------------------------------------------------------------------- 1 | // Package types provides shared types and structs. 2 | package types 3 | 4 | // Configs holds the key value pair 5 | type Config struct { 6 | Key string `json:"key" yaml:"key"` 7 | Value string `json:"value" yaml:"value"` 8 | Wait bool `json:"wait" yaml:"wait"` 9 | SkipRestart bool `json:"skip_restart" yaml:"skip_restart"` 10 | } 11 | 12 | // Configs is a slice of configs 13 | type Configs []Config 14 | -------------------------------------------------------------------------------- /microceph/api/types/disks.go: -------------------------------------------------------------------------------- 1 | // Package types provides shared types and structs. 2 | package types 3 | 4 | // DisksPost hold a path and a flag for enabling device wiping 5 | type DisksPost struct { 6 | Path []string `json:"path" yaml:"path"` 7 | Wipe bool `json:"wipe" yaml:"wipe"` 8 | Encrypt bool `json:"encrypt" yaml:"encrypt"` 9 | WALDev *string `json:"waldev" yaml:"waldev"` 10 | WALWipe bool `json:"walwipe" yaml:"walwipe"` 11 | WALEncrypt bool `json:"walencrypt" yaml:"walencrypt"` 12 | DBDev *string `json:"dbdev" yaml:"dbdev"` 13 | DBWipe bool `json:"dbwipe" yaml:"dbwipe"` 14 | DBEncrypt bool `json:"dbencrypt" yaml:"dbencrypt"` 15 | } 16 | 17 | // DiskAddReport holds report for single disk addition i.e. success/failure and optional error for failures. 18 | type DiskAddReport struct { 19 | Path string `json:"path" yaml:"path"` 20 | Report string `json:"report" yaml:"report"` 21 | Error string `json:"error" yaml:"error"` 22 | } 23 | 24 | // DiskAddResponse holds response data for disk addition. 25 | type DiskAddResponse struct { 26 | ValidationError string `json:"validation_error" yaml:"validation_error"` 27 | Reports []DiskAddReport `json:"report" yaml:"report"` 28 | } 29 | 30 | // DisksDelete holds an OSD number and a flag for forcing the removal 31 | type DisksDelete struct { 32 | OSD int64 `json:"osdid" yaml:"osdid"` 33 | BypassSafety bool `json:"bypass_safety" yaml:"bypass_safety"` 34 | ConfirmDowngrade bool `json:"confirm_downgrade" yaml:"confirm_downgrade"` 35 | ProhibitCrushScaledown bool `json:"prohibit_crush_scaledown" yaml:"prohibit_crush_scaledown"` 36 | Timeout int64 `json:"timeout" yaml:"timeout"` 37 | } 38 | 39 | // Disks is a slice of disks 40 | type Disks []Disk 41 | 42 | // Disk holds data for a device: OSD number, it's path and a location 43 | type Disk struct { 44 | OSD int64 `json:"osd" yaml:"osd"` 45 | Path string `json:"path" yaml:"path"` 46 | Location string `json:"location" yaml:"location"` 47 | } 48 | 49 | type DiskParameter struct { 50 | Path string 51 | Encrypt bool 52 | Wipe bool 53 | LoopSize uint64 54 | } 55 | -------------------------------------------------------------------------------- /microceph/api/types/endpoint_prefix.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "github.com/canonical/microcluster/v2/rest/types" 5 | ) 6 | 7 | const ( 8 | // ExtendedPathPrefix is the path prefix that will be used for the extended endpoints. 9 | ExtendedPathPrefix types.EndpointPrefix = "1.0" 10 | ) 11 | -------------------------------------------------------------------------------- /microceph/api/types/log.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Types for log management. 4 | type LogLevelPut struct { 5 | Level string `json:"level" yaml:"level"` 6 | } 7 | -------------------------------------------------------------------------------- /microceph/api/types/maintenance.go: -------------------------------------------------------------------------------- 1 | // Package types provides shared types and structs. 2 | package types 3 | 4 | type MaintenanceResult struct { 5 | Name string `json:"name"` 6 | Error string `json:"error"` 7 | Action string `json:"action"` 8 | } 9 | 10 | type MaintenanceResults []MaintenanceResult 11 | 12 | // Options for bringing a node into or out of maintenance 13 | type CommonMaintenanceFlags struct { 14 | DryRun bool `json:"dry_run"` 15 | CheckOnly bool `json:"check_only"` 16 | IgnoreCheck bool `json:"ignore_check"` 17 | } 18 | 19 | // Options for bringing a node into maintenance 20 | type EnterMaintenanceFlags struct { 21 | Force bool `json:"force"` 22 | SetNoout bool `json:"set_noout"` 23 | StopOsds bool `json:"stop_osds"` 24 | } 25 | 26 | // MaintenanceRequest holds data structure for bringing a node into or out of maintenance 27 | type MaintenanceRequest struct { 28 | Status string `json:"status" yaml:"status"` 29 | CommonMaintenanceFlags 30 | EnterMaintenanceFlags 31 | } 32 | -------------------------------------------------------------------------------- /microceph/api/types/pool.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Types for pool management. 4 | type PoolPut struct { 5 | Pools []string `json:"pools" yaml:"pools"` 6 | Size int64 `json:"size" yaml:"size"` 7 | } 8 | 9 | // Pool represents information about an OSD pool. 10 | type Pool struct { 11 | Pool string `json:"pool" yaml:"pool"` 12 | PoolID int64 `json:"pool_id" yaml:"pool_id"` 13 | Size int64 `json:"size" yaml:"size"` 14 | MinSize int64 `json:"min_size" yaml:"min_size"` 15 | CrushRule string `json:"crush_rule" yaml:"crush_rule"` 16 | } 17 | -------------------------------------------------------------------------------- /microceph/api/types/remote.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // RemoteImportRequest abstracts the data members for the remote import request. 4 | type RemoteImportRequest struct { 5 | Name string `json:"name" yaml:"name"` 6 | LocalName string `json:"local_name" yaml:"local_name"` 7 | Config map[string]string `json:"config" yaml:"config"` 8 | RenderOnly bool `json:"render_only" yaml:"render_only"` 9 | } 10 | 11 | func (r *RemoteImportRequest) Init(localName string, remoteName string, renderOnly bool) *RemoteImportRequest { 12 | r.LocalName = localName 13 | r.Name = remoteName 14 | r.Config = make(map[string]string) 15 | r.RenderOnly = false 16 | return r 17 | } 18 | 19 | // ClusterExportRequest abstracts the data members for cluster export request. 20 | type ClusterExportRequest struct { 21 | RemoteName string `json:"remote_name" yaml:"remote_name"` 22 | } 23 | 24 | // RemoteRecord exposes remote record structure in db to the client package. 25 | type RemoteRecord struct { 26 | // NOTE (utkarshbhatthere): The member names for this data structure 27 | // should match the database record structure. This has been taken out 28 | // since the client package should not import from database package. 29 | ID int `json:"id" yaml:"id"` 30 | // remote cluster name 31 | Name string `json:"name" yaml:"name"` 32 | // local cluster name 33 | LocalName string `json:"local_name" yaml:"local_name"` 34 | } 35 | 36 | type RemoteRecords []RemoteRecord 37 | -------------------------------------------------------------------------------- /microceph/api/types/replication.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "github.com/canonical/microceph/microceph/constants" 5 | ) 6 | 7 | // ################################## Generic Replication Request ################################## 8 | // ReplicationRequestType defines the various events replication request types. 9 | type ReplicationRequestType string 10 | 11 | // This value is split till '-' to get the API request type and the event name encoded in one string. 12 | const ( 13 | EnableReplicationRequest ReplicationRequestType = "POST-" + constants.EventEnableReplication 14 | ConfigureReplicationRequest ReplicationRequestType = "PUT-" + constants.EventConfigureReplication 15 | PromoteReplicationRequest ReplicationRequestType = "PUT-" + constants.EventPromoteReplication 16 | DemoteReplicationRequest ReplicationRequestType = "PUT-" + constants.EventDemoteReplication 17 | // Delete Requests 18 | DisableReplicationRequest ReplicationRequestType = "DELETE-" + constants.EventDisableReplication 19 | // Get Requests 20 | StatusReplicationRequest ReplicationRequestType = "GET-" + constants.EventStatusReplication 21 | ListReplicationRequest ReplicationRequestType = "GET-" + constants.EventListReplication 22 | // Workload request (has no REST object) 23 | WorkloadReplicationRequest ReplicationRequestType = "" 24 | ) 25 | 26 | type CephWorkloadType string 27 | 28 | const ( 29 | RbdWorkload CephWorkloadType = "rbd" 30 | FsWorkload CephWorkloadType = "cephfs" 31 | RgwWorkload CephWorkloadType = "rgw" 32 | ) 33 | 34 | // ReplicationRequest is interface for all Replication implementations (rbd, cephfs, rgw). 35 | // It defines methods used by: 36 | // 1. client code to make the API request 37 | // 2. Replication state machine to feed the correct event trigger. 38 | type ReplicationRequest interface { 39 | GetWorkloadType() CephWorkloadType 40 | GetAPIObjectId() string 41 | GetAPIRequestType() string 42 | GetWorkloadRequestType() string 43 | } 44 | -------------------------------------------------------------------------------- /microceph/api/types/services.go: -------------------------------------------------------------------------------- 1 | // Package types provides shared types and structs. 2 | package types 3 | 4 | // Services holds a slice of services 5 | type Services []Service 6 | 7 | // Service consist of a name and location 8 | type Service struct { 9 | Service string `json:"service" yaml:"service"` 10 | Location string `json:"location" yaml:"location"` 11 | } 12 | 13 | // Name: Name of the service to be enabled 14 | // Wait: Whether the operation is to be performed in sync or async 15 | // Payload: Service specific additional data encoded as a json string. 16 | type EnableService struct { 17 | Name string `json:"name" yaml:"name"` 18 | Wait bool `json:"bool" yaml:"bool"` 19 | Payload string `json:"payload" yaml:"payload"` 20 | // Enable Service passes all additional data as a json payload string. 21 | } 22 | 23 | // RGWService holds a port number and enable/disable flag 24 | type RGWService struct { 25 | Service 26 | Port int `json:"port" yaml:"port"` 27 | Enabled bool `json:"enabled" yaml:"enabled"` 28 | } 29 | 30 | // MonitorStatus holds the status of all monitors 31 | // for now, this is just the addresses of the monitors 32 | type MonitorStatus struct { 33 | Addresses []string `json:"addresses" yaml:"addresses"` 34 | } 35 | -------------------------------------------------------------------------------- /microceph/ceph/ceph_rbd_mirror.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/canonical/lxd/shared/logger" 8 | ) 9 | 10 | func bootstrapRbdMirror(hostname string, path string) error { 11 | args := []string{ 12 | "auth", 13 | "get-or-create", 14 | fmt.Sprintf("client.rbd-mirror.%s", hostname), 15 | "mon", "profile rbd-mirror", 16 | "osd", "profile rbd", 17 | "-o", filepath.Join(path, "keyring"), 18 | } 19 | 20 | _, err := cephRun(args...) 21 | if err != nil { 22 | logger.Errorf("failed to bootstrap rbd-mirror daemon: %s", err.Error()) 23 | return err 24 | } 25 | 26 | return nil 27 | } 28 | -------------------------------------------------------------------------------- /microceph/ceph/client_config.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "reflect" 7 | 8 | "github.com/canonical/microceph/microceph/interfaces" 9 | 10 | "github.com/canonical/microceph/microceph/common" 11 | "github.com/canonical/microceph/microceph/database" 12 | ) 13 | 14 | // ClientConfigT holds all the client configuration values *applicable* for 15 | // the host machine. These values are consumed by configwriter for ceph.conf 16 | // updation. This approach keeps the client config updation logic tied together 17 | // and easily extendable for more keys. 18 | type ClientConfigT struct { 19 | IsCache string 20 | CacheSize string 21 | IsCacheWritethrough string 22 | CacheMaxDirty string 23 | CacheTargetDirty string 24 | } 25 | 26 | // GetClientConfigForHost fetches all the applicable client configurations for the provided host. 27 | func GetClientConfigForHost(ctx context.Context, s interfaces.StateInterface, hostname string) (ClientConfigT, error) { 28 | retval := ClientConfigT{} 29 | 30 | // Get all client configs for the current host. 31 | configs, err := database.ClientConfigQuery.GetAllForHost(ctx, s.ClusterState(), hostname) 32 | if err != nil { 33 | return ClientConfigT{}, fmt.Errorf("could not query database for client configs: %v", err) 34 | } 35 | 36 | setterTable := GetClientConfigSet() 37 | for _, config := range configs { 38 | // Populate client config table using the database values. 39 | err = setFieldValue(&retval, fmt.Sprint(setterTable[config.Key]), config.Value) 40 | if err != nil { 41 | return ClientConfigT{}, fmt.Errorf("failed object population: %v", err) 42 | } 43 | } 44 | 45 | return retval, nil 46 | } 47 | 48 | // setFieldValue populates the individual client configuration values into ClientConfigT object fields. 49 | func setFieldValue(ogp *ClientConfigT, field string, value string) error { 50 | r := reflect.ValueOf(ogp) 51 | f := reflect.Indirect(r).FieldByName(field) 52 | if f.Kind() != reflect.Invalid { 53 | f.SetString(value) 54 | return nil 55 | } 56 | return fmt.Errorf("cannot set field %s", field) 57 | } 58 | 59 | // GetClientConfigSet provides the mapping between client config key and fieldname for population through reflection. 60 | func GetClientConfigSet() common.Set { 61 | return common.Set{ 62 | "rbd_cache": "IsCache", 63 | "rbd_cache_size": "CacheSize", 64 | "rbd_cache_writethrough_until_flush": "IsCacheWritethrough", 65 | "rbd_cache_max_dirty": "CacheMaxDirty", 66 | "rbd_cache_target_dirty": "CacheTargetDirty", 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /microceph/ceph/client_config_test.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "reflect" 7 | "testing" 8 | 9 | "github.com/canonical/lxd/shared/api" 10 | "github.com/canonical/microceph/microceph/tests" 11 | "github.com/canonical/microcluster/v2/state" 12 | 13 | "github.com/canonical/microceph/microceph/database" 14 | "github.com/canonical/microceph/microceph/mocks" 15 | "github.com/stretchr/testify/assert" 16 | "github.com/stretchr/testify/suite" 17 | ) 18 | 19 | type ClientConfigSuite struct { 20 | tests.BaseSuite 21 | TestStateInterface *mocks.StateInterface 22 | } 23 | 24 | func TestClientConfig(t *testing.T) { 25 | suite.Run(t, new(ClientConfigSuite)) 26 | } 27 | 28 | func (ccs *ClientConfigSuite) SetupTest() { 29 | ccs.BaseSuite.SetupTest() 30 | 31 | ccs.TestStateInterface = mocks.NewStateInterface(ccs.T()) 32 | u := api.NewURL() 33 | state := &mocks.MockState{ 34 | URL: u, 35 | ClusterName: "foohost", 36 | } 37 | 38 | ccs.TestStateInterface.On("ClusterState").Return(state) 39 | } 40 | 41 | func addGetHostConfigsExpectation(mci *mocks.ClientConfigQueryIntf, cs state.State, hostname string) { 42 | output := database.ClientConfigItems{} 43 | count := 0 44 | for configKey, field := range GetClientConfigSet() { 45 | count++ 46 | output = append(output, database.ClientConfigItem{ 47 | ID: count, 48 | Host: hostname, 49 | Key: configKey, 50 | Value: fmt.Sprintf("%v", field), 51 | }) 52 | } 53 | 54 | mci.On("GetAllForHost", cs, hostname).Return(output, nil) 55 | } 56 | 57 | func (ccs *ClientConfigSuite) TestFetchHostConfig() { 58 | hostname := "testHostname" 59 | 60 | // Mock Client config query interface. 61 | ccq := mocks.NewClientConfigQueryIntf(ccs.T()) 62 | addGetHostConfigsExpectation(ccq, ccs.TestStateInterface.ClusterState(), hostname) 63 | database.ClientConfigQuery = ccq 64 | 65 | configs, err := GetClientConfigForHost(context.Background(), ccs.TestStateInterface, hostname) 66 | assert.NoError(ccs.T(), err) 67 | 68 | // check fields 69 | metaConfigs := reflect.ValueOf(configs) 70 | for i := 0; i < metaConfigs.NumField(); i++ { 71 | assert.Equal(ccs.T(), metaConfigs.Field(i).Interface(), metaConfigs.Type().Field(i).Name) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /microceph/ceph/keyring_test.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/canonical/microceph/microceph/mocks" 7 | "github.com/canonical/microceph/microceph/tests" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/suite" 10 | ) 11 | 12 | type KeyringSuite struct { 13 | tests.BaseSuite 14 | TestStateInterface *mocks.StateInterface 15 | } 16 | 17 | func TestKeyring(t *testing.T) { 18 | suite.Run(t, new(KeyringSuite)) 19 | } 20 | 21 | func (ks *KeyringSuite) SetupTest() { 22 | ks.BaseSuite.SetupTest() 23 | ks.CopyCephConfigs() 24 | } 25 | 26 | func (ks *KeyringSuite) TestClientKeyringCreation() { 27 | r := mocks.NewRunner(ks.T()) 28 | 29 | // mocks and expectations 30 | r.On("RunCommand", []interface{}{ 31 | "ceph", "auth", "get-or-create", "client.RemoteName"}...).Return("ok", nil).Once() 32 | r.On("RunCommand", []interface{}{ 33 | "ceph", "auth", "print-key", "client.RemoteName"}...).Return("ABCD", nil).Once() 34 | processExec = r 35 | 36 | // Method call 37 | clientKey, err := CreateClientKey("RemoteName") 38 | 39 | assert.NoError(ks.T(), err) 40 | assert.Equal(ks.T(), clientKey, "ABCD") 41 | } 42 | 43 | func (ks *KeyringSuite) TestClientKeyringDelete() { 44 | r := mocks.NewRunner(ks.T()) 45 | 46 | // mocks and expectations 47 | r.On("RunCommand", []interface{}{ 48 | "ceph", "auth", "del", "client.RemoteName"}...).Return("ok", nil).Once() 49 | processExec = r 50 | 51 | // Method call 52 | err := DeleteClientKey("RemoteName") 53 | 54 | assert.NoError(ks.T(), err) 55 | } 56 | -------------------------------------------------------------------------------- /microceph/ceph/log.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "unsafe" 7 | 8 | "github.com/canonical/lxd/shared/logger" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // The following is hack to access a private member in lxd's logger. 13 | 14 | type targetLogger interface { 15 | Panic(args ...interface{}) 16 | Fatal(args ...interface{}) 17 | Error(args ...interface{}) 18 | Warn(args ...interface{}) 19 | Info(args ...interface{}) 20 | Debug(args ...interface{}) 21 | Trace(args ...interface{}) 22 | WithFields(fields logrus.Fields) *logrus.Entry 23 | } 24 | 25 | type logWrapper struct { 26 | target targetLogger 27 | } 28 | 29 | func SetLogLevel(level string) error { 30 | lrLevel, err := logrus.ParseLevel(level) 31 | if err != nil { 32 | // Has to be an integer level. 33 | ilvl, err := strconv.Atoi(level) 34 | if err != nil { 35 | return err 36 | } else if ilvl < 0 || ilvl > int(logrus.TraceLevel) { 37 | return fmt.Errorf("invalid log level: %v", ilvl) 38 | } 39 | 40 | lrLevel = logrus.Level(ilvl) 41 | } 42 | 43 | wrapper := (*logWrapper)(unsafe.Pointer(&logger.Log)) 44 | target := (*logrus.Logger)(unsafe.Pointer(&wrapper.target)) 45 | target.SetLevel(lrLevel) 46 | return nil 47 | } 48 | 49 | func GetLogLevel() uint32 { 50 | wrapper := (*logWrapper)(unsafe.Pointer(&logger.Log)) 51 | target := (*logrus.Logger)(unsafe.Pointer(&wrapper.target)) 52 | return uint32(target.GetLevel()) 53 | } 54 | -------------------------------------------------------------------------------- /microceph/ceph/manager.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | ) 7 | 8 | func bootstrapMgr(hostname string, path string) error { 9 | args := []string{ 10 | "auth", 11 | "get-or-create", 12 | fmt.Sprintf("mgr.%s", hostname), 13 | "mon", "allow profile mgr", 14 | "osd", "allow *", 15 | "mds", "allow *", 16 | "-o", filepath.Join(path, "keyring"), 17 | } 18 | 19 | _, err := cephRun(args...) 20 | if err != nil { 21 | return err 22 | } 23 | 24 | return nil 25 | } 26 | -------------------------------------------------------------------------------- /microceph/ceph/metadata.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | ) 7 | 8 | func bootstrapMds(hostname string, path string) error { 9 | args := []string{ 10 | "auth", 11 | "get-or-create", 12 | fmt.Sprintf("mds.%s", hostname), 13 | "mon", "allow profile mds", 14 | "mgr", "allow profile mds", 15 | "mds", "allow *", 16 | "osd", "allow *", 17 | "-o", filepath.Join(path, "keyring"), 18 | } 19 | 20 | _, err := cephRun(args...) 21 | if err != nil { 22 | return err 23 | } 24 | 25 | return nil 26 | } 27 | -------------------------------------------------------------------------------- /microceph/ceph/monitor.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "fmt" 5 | "github.com/canonical/lxd/shared/logger" 6 | "os" 7 | "path/filepath" 8 | ) 9 | 10 | func genMonmap(path string, fsid string) error { 11 | args := []string{ 12 | "--create", 13 | "--fsid", fsid, 14 | path, 15 | } 16 | 17 | _, err := processExec.RunCommand("monmaptool", args...) 18 | if err != nil { 19 | return err 20 | } 21 | 22 | return nil 23 | } 24 | 25 | func addMonmap(path string, name string, address string) error { 26 | args := []string{ 27 | "--add", 28 | name, 29 | address, 30 | path, 31 | } 32 | 33 | _, err := processExec.RunCommand("monmaptool", args...) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | return nil 39 | } 40 | 41 | func bootstrapMon(hostname string, path string, monmap string, keyring string) error { 42 | args := []string{ 43 | "--mkfs", 44 | "-i", hostname, 45 | "--mon-data", path, 46 | "--monmap", monmap, 47 | "--keyring", keyring, 48 | } 49 | 50 | _, err := processExec.RunCommand("ceph-mon", args...) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | 58 | func joinMon(hostname string, path string) error { 59 | tmpPath, err := os.MkdirTemp("", "") 60 | if err != nil { 61 | return fmt.Errorf("unable to create temporary path: %w", err) 62 | } 63 | defer os.RemoveAll(tmpPath) 64 | 65 | monmap := filepath.Join(tmpPath, "mon.map") 66 | _, err = cephRun("mon", "getmap", "-o", monmap) 67 | if err != nil { 68 | return fmt.Errorf("failed to retrieve monmap: %w", err) 69 | } 70 | 71 | keyring := filepath.Join(tmpPath, "mon.keyring") 72 | _, err = cephRun("auth", "get", "mon.", "-o", keyring) 73 | if err != nil { 74 | return fmt.Errorf("failed to retrieve mon keyring: %w", err) 75 | } 76 | 77 | return bootstrapMon(hostname, path, monmap, keyring) 78 | } 79 | 80 | // removeMon removes a monitor from the cluster. 81 | func removeMon(hostname string) error { 82 | _, err := cephRun("mon", "rm", hostname) 83 | if err != nil { 84 | logger.Errorf("failed to remove monitor %q: %v", hostname, err) 85 | return fmt.Errorf("failed to remove monitor %q: %w", hostname, err) 86 | } 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /microceph/ceph/run.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | func cephRun(args ...string) (string, error) { 4 | return processExec.RunCommand("ceph", args...) 5 | } 6 | -------------------------------------------------------------------------------- /microceph/ceph/service_placement_mon.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | 8 | "github.com/canonical/microceph/microceph/interfaces" 9 | 10 | "github.com/canonical/microceph/microceph/database" 11 | ) 12 | 13 | type MonServicePlacement struct { 14 | Name string 15 | } 16 | 17 | // Populate json payload data to the service object. 18 | func (msp *MonServicePlacement) PopulateParams(s interfaces.StateInterface, payload string) error { 19 | return nil 20 | } 21 | 22 | // Check if host is hospitable to the new service to be enabled. 23 | func (msp *MonServicePlacement) HospitalityCheck(s interfaces.StateInterface) error { 24 | return genericHospitalityCheck(msp.Name) 25 | } 26 | 27 | // Initialise the new service. 28 | func (msp *MonServicePlacement) ServiceInit(ctx context.Context, s interfaces.StateInterface) error { 29 | return genericServiceInit(s, msp.Name) 30 | } 31 | 32 | // Perform Post Placement checks for the service 33 | func (msp *MonServicePlacement) PostPlacementCheck(s interfaces.StateInterface) error { 34 | return genericPostPlacementCheck(msp.Name) 35 | } 36 | 37 | // Perform DB updates to persist the service enablement changes. 38 | func (msp *MonServicePlacement) DbUpdate(ctx context.Context, s interfaces.StateInterface) error { 39 | // Update the database. 40 | err := s.ClusterState().Database().Transaction(ctx, func(ctx context.Context, tx *sql.Tx) error { 41 | // Record the role. 42 | _, err := database.CreateService(ctx, tx, database.Service{Member: s.ClusterState().Name(), Service: msp.Name}) 43 | if err != nil { 44 | return fmt.Errorf("failed to record role: %w", err) 45 | } 46 | 47 | err = updateDbForMon(s, ctx, tx) 48 | if err != nil { 49 | return fmt.Errorf("failed to record mon host: %w", err) 50 | } 51 | 52 | return nil 53 | }) 54 | if err != nil { 55 | return err 56 | } 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /microceph/ceph/services_placement_rgw.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | 8 | "github.com/canonical/microceph/microceph/interfaces" 9 | ) 10 | 11 | type RgwServicePlacement struct { 12 | Port int 13 | SSLPort int 14 | SSLCertificate string 15 | SSLPrivateKey string 16 | } 17 | 18 | func (rgw *RgwServicePlacement) PopulateParams(s interfaces.StateInterface, payload string) error { 19 | 20 | err := json.Unmarshal([]byte(payload), &rgw) 21 | if err != nil { 22 | return err 23 | } 24 | 25 | return nil 26 | } 27 | 28 | func (rgw *RgwServicePlacement) HospitalityCheck(s interfaces.StateInterface) error { 29 | return genericHospitalityCheck("rgw") 30 | } 31 | 32 | func (rgw *RgwServicePlacement) ServiceInit(ctx context.Context, s interfaces.StateInterface) error { 33 | // fetch configs from db 34 | config, err := GetConfigDb(ctx, s) 35 | if err != nil { 36 | return fmt.Errorf("failed to get config db: %w", err) 37 | } 38 | 39 | return EnableRGW(s, rgw.Port, rgw.SSLPort, rgw.SSLCertificate, rgw.SSLPrivateKey, getMonitorsFromConfig(config)) 40 | } 41 | 42 | func (rgw *RgwServicePlacement) PostPlacementCheck(s interfaces.StateInterface) error { 43 | return genericPostPlacementCheck("rgw") 44 | } 45 | 46 | func (rgw *RgwServicePlacement) DbUpdate(ctx context.Context, s interfaces.StateInterface) error { 47 | return genericDbUpdate(ctx, s, "rgw") 48 | } 49 | -------------------------------------------------------------------------------- /microceph/ceph/snap.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/canonical/lxd/shared/logger" 8 | ) 9 | 10 | // Check if a snapd interface is connected to microceph 11 | func isIntfConnected(name string) bool { 12 | args := []string{ 13 | "is-connected", 14 | name, 15 | } 16 | 17 | _, err := processExec.RunCommand("snapctl", args...) 18 | if err != nil { // Non-zero return code when connection not present. 19 | logger.Errorf("Failure: check is-connected %s: %v", name, err) 20 | return false 21 | } 22 | 23 | // 0 return code when connection is present 24 | return true 25 | } 26 | 27 | // snapStart starts a service via snapctl, optionally enabling it. 28 | func snapStart(service string, enable bool) error { 29 | args := []string{ 30 | "start", 31 | fmt.Sprintf("microceph.%s", service), 32 | } 33 | 34 | if enable { 35 | args = append(args, "--enable") 36 | } 37 | 38 | _, err := processExec.RunCommand("snapctl", args...) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | return nil 44 | } 45 | 46 | // snapStop stops a service via snapctl, optionally disabling it. 47 | func snapStop(service string, disable bool) error { 48 | args := []string{ 49 | "stop", 50 | fmt.Sprintf("microceph.%s", service), 51 | } 52 | 53 | if disable { 54 | args = append(args, "--disable") 55 | } 56 | 57 | _, err := processExec.RunCommand("snapctl", args...) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | return nil 63 | } 64 | 65 | // Restarts (optionally reloads) a service via snapctl. 66 | func snapRestart(service string, isReload bool) error { 67 | args := []string{ 68 | "restart", 69 | } 70 | 71 | if isReload { 72 | args = append(args, "--reload") 73 | } 74 | 75 | args = append(args, fmt.Sprintf("microceph.%s", service)) 76 | 77 | _, err := processExec.RunCommand("snapctl", args...) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | return nil 83 | } 84 | 85 | // Check if a particular snap service is active or inactive 86 | func snapCheckActive(service string) error { 87 | args := []string{ 88 | "services", 89 | fmt.Sprintf("microceph.%s", service), 90 | } 91 | 92 | out, err := processExec.RunCommand("snapctl", args...) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | // Check if the particular service is inactive. 98 | if strings.Contains(out, "inactive") { 99 | return fmt.Errorf("%s service is not active", service) 100 | } 101 | 102 | return nil 103 | } 104 | -------------------------------------------------------------------------------- /microceph/ceph/subprocess.go: -------------------------------------------------------------------------------- 1 | package ceph 2 | 3 | import ( 4 | "context" 5 | "github.com/canonical/lxd/shared" 6 | ) 7 | 8 | // Runner launches processes 9 | type Runner interface { 10 | RunCommand(name string, arg ...string) (string, error) 11 | RunCommandContext(ctx context.Context, name string, arg ...string) (string, error) 12 | } 13 | 14 | // RunnerImpl for launching processes 15 | type RunnerImpl struct{} 16 | 17 | // RunCommand runs a process given a path to a binary and a list of args 18 | func (c RunnerImpl) RunCommand(name string, arg ...string) (string, error) { 19 | return shared.RunCommand(name, arg...) 20 | } 21 | 22 | // RunCommandContext runs a process given a context, a path to a binary and a list of args 23 | func (c RunnerImpl) RunCommandContext(ctx context.Context, name string, arg ...string) (string, error) { 24 | return shared.RunCommandContext(ctx, name, arg...) 25 | } 26 | 27 | // Singleton runner: make this patch-able for testing purposes. 28 | // By default executes via shared.RunCommand() 29 | var processExec Runner = RunnerImpl{} 30 | -------------------------------------------------------------------------------- /microceph/ceph/test_assets/rbd_mirror_image_status.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "image_one", 3 | "global_id": "c6e4ea14-be4a-41f4-ba76-dbb64595e600", 4 | "state": "up+stopped", 5 | "description": "local image is primary", 6 | "daemon_service": { 7 | "service_id": "14177", 8 | "instance_id": "14221", 9 | "daemon_id": "magical-reindeer", 10 | "hostname": "magical-reindeer" 11 | }, 12 | "last_update": "2024-10-09 07:56:24", 13 | "peer_sites": [ 14 | { 15 | "site_name": "simple", 16 | "mirror_uuids": "84f58bda-4eea-45b1-9a5a-296cf1b82a65", 17 | "state": "up+replaying", 18 | "description": "replaying, {\"bytes_per_second\":0.0,\"entries_behind_primary\":0,\"entries_per_second\":0.0,\"non_primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1},\"primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1}}", 19 | "last_update": "2024-10-09 07:56:28" 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /microceph/ceph/test_assets/rbd_mirror_pool_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "mode": "pool", 3 | "site_name": "magical", 4 | "peers": [ 5 | { 6 | "uuid": "f3ee5939-66a6-494f-849a-a4402ddb4d18", 7 | "direction": "rx-tx", 8 | "site_name": "simple", 9 | "mirror_uuid": "84f58bda-4eea-45b1-9a5a-296cf1b82a65", 10 | "client_name": "client.rbd-mirror-peer" 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /microceph/ceph/test_assets/rbd_mirror_pool_status.json: -------------------------------------------------------------------------------- 1 | { 2 | "summary": { 3 | "health": "OK", 4 | "daemon_health": "OK", 5 | "image_health": "OK", 6 | "states": { 7 | "replaying": 2 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /microceph/ceph/test_assets/rbd_mirror_promote_secondary_failure.txt: -------------------------------------------------------------------------------- 1 | rbd: failed to 2024-10-09T11:00:10.804+0000 7f65a4bce6c0 -1 librbd::mirror::PromoteRequest: 0x7f6588018e20 handle_get_info: image is primary within a remote cluster or demotion is not propagated yet 2 | promote image image_one: (16) Device or resource busy 3 | 2024-10-09T11:00:10.804+0000 7f65a4bce6c0 -1 librbd::io::AioCompletion: 0x7f65980061c0 fail: (16) Device or resource busy 4 | 2024-10-09T11:00:10.808+0000 7f65a4bce6c0 -1 librbd::mirror::PromoteRequest: 0x7f658c008c50 handle_get_info: image is primary within a remote cluster or demotion is not propagated yet 5 | 2024-10-09T11:00:10.808+0000 7f65a4bce6c0 -1 librbd::io::AioCompletion: 0x7f65980061c0 fail: (16) Device or resource busy 6 | rbd: failed to promote image image_two: (16) Device or resource busy 7 | 2024-10-09T11:00:10.812+0000 7f65a53cf6c0 -1 librbd::mirror::PromoteRequest: 0x7f6588018e20 handle_get_info: image is primary within a remote cluster or demotion is not propagated yet 8 | 2024-10-09T11:00:10.812+0000 7f65a53cf6c0 -1 librbd::io::AioCompletion: 0x7f658c0069e0 fail: (16) Device or resource busy 9 | rbd: failed to promote image image_three: (16) Device or resource busy 10 | Promoted 0 mirrored images -------------------------------------------------------------------------------- /microceph/ceph/test_assets/rbd_mirror_verbose_pool_status.json: -------------------------------------------------------------------------------- 1 | { 2 | "summary": { 3 | "health": "OK", 4 | "daemon_health": "OK", 5 | "image_health": "OK", 6 | "states": { 7 | "replaying": 2 8 | } 9 | }, 10 | "daemons": [ 11 | { 12 | "service_id": "14173", 13 | "instance_id": "14198", 14 | "client_id": "magical-reindeer", 15 | "hostname": "magical-reindeer", 16 | "ceph_version": "19.2.0~git20240301.4c76c50", 17 | "leader": true, 18 | "health": "OK" 19 | } 20 | ], 21 | "images": [ 22 | { 23 | "name": "image_one", 24 | "global_id": "ebbea3fc-78c5-41e7-a796-d2fc59c691c6", 25 | "state": "up+stopped", 26 | "description": "local image is primary", 27 | "daemon_service": { 28 | "service_id": "14173", 29 | "instance_id": "14198", 30 | "daemon_id": "magical-reindeer", 31 | "hostname": "magical-reindeer" 32 | }, 33 | "last_update": "2024-10-09 05:55:27", 34 | "peer_sites": [ 35 | { 36 | "site_name": "simple", 37 | "mirror_uuids": "ced68f5f-f982-4ca2-b823-c68be7b86c93", 38 | "state": "up+replaying", 39 | "description": "replaying, {\"bytes_per_second\":0.0,\"entries_behind_primary\":0,\"entries_per_second\":0.0,\"non_primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1},\"primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1}}", 40 | "last_update": "2024-10-09 05:55:27" 41 | } 42 | ] 43 | }, 44 | { 45 | "name": "image_two", 46 | "global_id": "0f35d44b-60fd-4294-adc9-eb7a65815db9", 47 | "state": "up+stopped", 48 | "description": "local image is primary", 49 | "daemon_service": { 50 | "service_id": "14173", 51 | "instance_id": "14198", 52 | "daemon_id": "magical-reindeer", 53 | "hostname": "magical-reindeer" 54 | }, 55 | "last_update": "2024-10-09 05:55:27", 56 | "peer_sites": [ 57 | { 58 | "site_name": "simple", 59 | "mirror_uuids": "ced68f5f-f982-4ca2-b823-c68be7b86c93", 60 | "state": "up+replaying", 61 | "description": "replaying, {\"bytes_per_second\":0.0,\"entries_behind_primary\":0,\"entries_per_second\":0.0,\"non_primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1},\"primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1}}", 62 | "last_update": "2024-10-09 05:55:27" 63 | } 64 | ] 65 | } 66 | ] 67 | } 68 | -------------------------------------------------------------------------------- /microceph/client/cluster.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/canonical/lxd/shared/api" 9 | "github.com/canonical/microceph/microceph/api/types" 10 | microCli "github.com/canonical/microcluster/v2/client" 11 | ) 12 | 13 | func GetClusterToken(ctx context.Context, c *microCli.Client, req types.ClusterExportRequest) (string, error) { 14 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*5) 15 | defer cancel() 16 | 17 | var state string 18 | 19 | err := c.Query(queryCtx, "GET", types.ExtendedPathPrefix, api.NewURL().Path("cluster"), req, &state) 20 | if err != nil { 21 | return "", fmt.Errorf("failed to fetch cluster state: %w", err) 22 | } 23 | 24 | return state, nil 25 | } 26 | -------------------------------------------------------------------------------- /microceph/client/configs.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/canonical/lxd/shared/api" 9 | "github.com/canonical/microceph/microceph/api/types" 10 | microCli "github.com/canonical/microcluster/v2/client" 11 | ) 12 | 13 | func SetConfig(ctx context.Context, c *microCli.Client, data *types.Config) error { 14 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*200) 15 | defer cancel() 16 | 17 | err := c.Query(queryCtx, "PUT", types.ExtendedPathPrefix, api.NewURL().Path("configs"), data, nil) 18 | if err != nil { 19 | return fmt.Errorf("failed setting cluster config: %w, Key: %s, Value: %s", err, data.Key, data.Value) 20 | } 21 | 22 | return nil 23 | } 24 | 25 | func ClearConfig(ctx context.Context, c *microCli.Client, data *types.Config) error { 26 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*200) 27 | defer cancel() 28 | 29 | err := c.Query(queryCtx, "DELETE", types.ExtendedPathPrefix, api.NewURL().Path("configs"), data, nil) 30 | if err != nil { 31 | return fmt.Errorf("failed clearing cluster config: %w, Key: %s", err, data.Key) 32 | } 33 | 34 | return nil 35 | } 36 | 37 | func GetConfig(ctx context.Context, c *microCli.Client, data *types.Config) (types.Configs, error) { 38 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*5) 39 | defer cancel() 40 | 41 | configs := types.Configs{} 42 | 43 | err := c.Query(queryCtx, "GET", types.ExtendedPathPrefix, api.NewURL().Path("configs"), data, &configs) 44 | if err != nil { 45 | return nil, fmt.Errorf("failed to fetch cluster config: %w, Key: %s", err, data.Key) 46 | } 47 | 48 | return configs, nil 49 | } 50 | -------------------------------------------------------------------------------- /microceph/client/log.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/canonical/lxd/shared/api" 9 | microCli "github.com/canonical/microcluster/v2/client" 10 | 11 | "github.com/canonical/microceph/microceph/api/types" 12 | ) 13 | 14 | func LogLevelSet(ctx context.Context, c *microCli.Client, data *types.LogLevelPut) error { 15 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*120) 16 | defer cancel() 17 | 18 | err := c.Query(queryCtx, "PUT", types.ExtendedPathPrefix, api.NewURL().Path("microceph", "configs", "log-level"), data, nil) 19 | if err != nil { 20 | return fmt.Errorf("failed setting log level: %w", err) 21 | } 22 | 23 | return nil 24 | } 25 | 26 | func LogLevelGet(ctx context.Context, c *microCli.Client) (uint32, error) { 27 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*5) 28 | defer cancel() 29 | 30 | level := uint32(0) 31 | 32 | err := c.Query(queryCtx, "GET", types.ExtendedPathPrefix, api.NewURL().Path("microceph", "configs", "log-level"), nil, &level) 33 | if err != nil { 34 | return 0, fmt.Errorf("failed getting log level: %w", err) 35 | } 36 | 37 | return level, nil 38 | } 39 | -------------------------------------------------------------------------------- /microceph/client/pool.go: -------------------------------------------------------------------------------- 1 | // Package client provides a full Go API client. 2 | package client 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/canonical/lxd/shared/api" 10 | microCli "github.com/canonical/microcluster/v2/client" 11 | 12 | "github.com/canonical/microceph/microceph/api/types" 13 | ) 14 | 15 | func PoolSetReplicationFactor(ctx context.Context, c *microCli.Client, data *types.PoolPut) error { 16 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*120) 17 | defer cancel() 18 | 19 | err := c.Query(queryCtx, "PUT", types.ExtendedPathPrefix, api.NewURL().Path("pools-op"), data, nil) 20 | if err != nil { 21 | return fmt.Errorf("failed setting replication factor: %w", err) 22 | } 23 | 24 | return nil 25 | } 26 | 27 | func GetPools(ctx context.Context, c *microCli.Client) ([]types.Pool, error) { 28 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*120) 29 | defer cancel() 30 | 31 | var pools []types.Pool 32 | err := c.Query(queryCtx, "GET", types.ExtendedPathPrefix, api.NewURL().Path("pools"), nil, &pools) 33 | if err != nil { 34 | return nil, fmt.Errorf("failed to fetch OSD pools: %w", err) 35 | } 36 | 37 | return pools, nil 38 | 39 | } 40 | -------------------------------------------------------------------------------- /microceph/client/replication.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/canonical/lxd/shared/api" 9 | "github.com/canonical/microceph/microceph/api/types" 10 | microCli "github.com/canonical/microcluster/v2/client" 11 | ) 12 | 13 | // Sends replication request for creating, deleting, getting, and listing remote replication. 14 | func SendReplicationRequest(ctx context.Context, c *microCli.Client, data types.ReplicationRequest) (string, error) { 15 | var err error 16 | var resp string 17 | queryCtx, cancel := context.WithTimeout(ctx, time.Second*120) 18 | defer cancel() 19 | 20 | // If no API object provided, create API request to the root endpoint. 21 | if len(data.GetAPIObjectId()) == 0 { 22 | // uses replication/$workload endpoint 23 | err = c.Query( 24 | queryCtx, data.GetAPIRequestType(), types.ExtendedPathPrefix, 25 | api.NewURL().Path("ops", "replication", string(data.GetWorkloadType())), 26 | data, &resp, 27 | ) 28 | } else { 29 | // Other requests use replication/$workload/$resource endpoint 30 | err = c.Query( 31 | queryCtx, data.GetAPIRequestType(), types.ExtendedPathPrefix, 32 | api.NewURL().Path("ops", "replication", string(data.GetWorkloadType()), data.GetAPIObjectId()), 33 | data, &resp, 34 | ) 35 | } 36 | if err != nil { 37 | return "", fmt.Errorf("failed to process %s request for %s: %w", data.GetWorkloadRequestType(), data.GetWorkloadType(), err) 38 | } 39 | 40 | return resp, nil 41 | } 42 | -------------------------------------------------------------------------------- /microceph/client/wrap.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "github.com/canonical/microceph/microceph/api/types" 6 | 7 | microCli "github.com/canonical/microcluster/v2/client" 8 | ) 9 | 10 | // ClientInterface wraps client functions 11 | // This is useful for mocking in unit tests 12 | type ClientInterface interface { 13 | GetClusterMembers(*microCli.Client) ([]string, error) 14 | GetDisks(*microCli.Client) (types.Disks, error) 15 | GetServices(*microCli.Client) (types.Services, error) 16 | DeleteService(*microCli.Client, string, string) error 17 | DeleteClusterMember(*microCli.Client, string, bool) error 18 | } 19 | 20 | type ClientImpl struct{} 21 | 22 | // GetClusterMembers gets the cluster member names 23 | // We return names only here because the Member type is internal to microclient 24 | func (c ClientImpl) GetClusterMembers(cli *microCli.Client) ([]string, error) { 25 | memberNames := make([]string, 3) 26 | members, err := cli.GetClusterMembers(context.Background()) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | for _, member := range members { 32 | memberNames = append(memberNames, member.Name) 33 | } 34 | 35 | return memberNames, nil 36 | } 37 | 38 | // GetDisks wraps the GetDisks function above 39 | func (c ClientImpl) GetDisks(cli *microCli.Client) (types.Disks, error) { 40 | return GetDisks(context.Background(), cli) 41 | } 42 | 43 | // GetServices wraps the GetServices function above 44 | func (c ClientImpl) GetServices(cli *microCli.Client) (types.Services, error) { 45 | return GetServices(context.Background(), cli) 46 | } 47 | 48 | // DeleteService wraps the DeleteService function 49 | func (c ClientImpl) DeleteService(cli *microCli.Client, target string, service string) error { 50 | return DeleteService(context.Background(), cli, target, service) 51 | } 52 | 53 | // DeleteClusterMember wraps the DeleteClusterMember function 54 | func (c ClientImpl) DeleteClusterMember(cli *microCli.Client, name string, force bool) error { 55 | return cli.DeleteClusterMember(context.Background(), name, force) 56 | } 57 | 58 | // mocking point for unit tests 59 | var MClient ClientInterface = ClientImpl{} 60 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdClient struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdClient) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "client", 14 | Short: "Manage the MicroCeph clients", 15 | } 16 | 17 | // Config Subcommand 18 | clientConfigCmd := cmdClientConfig{common: c.common, client: c} 19 | cmd.AddCommand(clientConfigCmd.Command()) 20 | 21 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 22 | cmd.Args = cobra.NoArgs 23 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 24 | 25 | return cmd 26 | } 27 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/client_config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdClientConfig struct { 8 | common *CmdControl 9 | client *cmdClient 10 | } 11 | 12 | func (c *cmdClientConfig) Command() *cobra.Command { 13 | cmd := &cobra.Command{ 14 | Use: "config", 15 | Short: "Manage Ceph Client configs", 16 | } 17 | 18 | // Get 19 | clientConfigGetCmd := cmdClientConfigGet{common: c.common, client: c.client, clientConfig: c} 20 | cmd.AddCommand(clientConfigGetCmd.Command()) 21 | 22 | // Set 23 | clientConfigSetCmd := cmdClientConfigSet{common: c.common, client: c.client, clientConfig: c} 24 | cmd.AddCommand(clientConfigSetCmd.Command()) 25 | 26 | // Reset 27 | clientConfigResetCmd := cmdClientConfigReset{common: c.common, client: c.client, clientConfig: c} 28 | cmd.AddCommand(clientConfigResetCmd.Command()) 29 | 30 | // List 31 | clientConfigListCmd := cmdClientConfigList{common: c.common, client: c.client, clientConfig: c} 32 | cmd.AddCommand(clientConfigListCmd.Command()) 33 | 34 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 35 | cmd.Args = cobra.NoArgs 36 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 37 | 38 | return cmd 39 | } 40 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/client_config_get.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | lxdCmd "github.com/canonical/lxd/shared/cmd" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/canonical/microceph/microceph/api/types" 12 | "github.com/canonical/microceph/microceph/ceph" 13 | "github.com/canonical/microceph/microceph/client" 14 | ) 15 | 16 | type cmdClientConfigGet struct { 17 | common *CmdControl 18 | client *cmdClient 19 | clientConfig *cmdClientConfig 20 | 21 | flagHost string 22 | } 23 | 24 | func (c *cmdClientConfigGet) Command() *cobra.Command { 25 | cmd := &cobra.Command{ 26 | Use: "get ", 27 | Short: "Fetches specified Ceph Client config", 28 | RunE: c.Run, 29 | } 30 | 31 | // * stands for global configs, hence all configs are global by default unless specified. 32 | cmd.Flags().StringVar(&c.flagHost, "target", "*", "Specify a microceph node the provided config should be applied to.") 33 | return cmd 34 | } 35 | 36 | func (c *cmdClientConfigGet) Run(cmd *cobra.Command, args []string) error { 37 | allowList := ceph.GetClientConfigSet() 38 | 39 | // Get can be called with a single key. 40 | if len(args) != 1 { 41 | return cmd.Help() 42 | } 43 | 44 | _, ok := allowList[args[0]] 45 | if !ok { 46 | return fmt.Errorf("key %s is invalid. \nSupported Keys: %v", args[0], allowList.Keys()) 47 | } 48 | 49 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 50 | if err != nil { 51 | return fmt.Errorf("unable to configure MicroCeph: %w", err) 52 | } 53 | 54 | cli, err := m.LocalClient() 55 | if err != nil { 56 | return err 57 | } 58 | 59 | req := &types.ClientConfig{ 60 | Key: args[0], 61 | Host: c.flagHost, 62 | } 63 | 64 | configs, err := client.GetClientConfig(context.Background(), cli, req) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | data := make([][]string, len(configs)) 70 | for i, config := range configs { 71 | data[i] = []string{fmt.Sprintf("%d", i), config.Key, config.Value, config.Host} 72 | } 73 | 74 | header := []string{"#", "Key", "Value", "Host"} 75 | err = lxdCmd.RenderTable(lxdCmd.TableFormatTable, header, data, configs) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | return nil 81 | } 82 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/client_config_list.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | lxdCmd "github.com/canonical/lxd/shared/cmd" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/canonical/microceph/microceph/api/types" 12 | "github.com/canonical/microceph/microceph/client" 13 | ) 14 | 15 | type cmdClientConfigList struct { 16 | common *CmdControl 17 | client *cmdClient 18 | clientConfig *cmdClientConfig 19 | 20 | flagHost string 21 | } 22 | 23 | func (c *cmdClientConfigList) Command() *cobra.Command { 24 | cmd := &cobra.Command{ 25 | Use: "list", 26 | Short: "Lists all configured Ceph Client configs", 27 | RunE: c.Run, 28 | } 29 | 30 | // * stands for global configs, hence all configs are global by default unless specifies. 31 | cmd.Flags().StringVar(&c.flagHost, "target", "*", "Specify a microceph node the provided config should be applied to.") 32 | return cmd 33 | } 34 | 35 | func (c *cmdClientConfigList) Run(cmd *cobra.Command, args []string) error { 36 | if len(args) != 0 { 37 | return cmd.Help() 38 | } 39 | 40 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 41 | if err != nil { 42 | return fmt.Errorf("unable to configure MicroCeph: %w", err) 43 | } 44 | 45 | cli, err := m.LocalClient() 46 | if err != nil { 47 | return err 48 | } 49 | 50 | req := &types.ClientConfig{ 51 | Host: c.flagHost, 52 | } 53 | 54 | configs, err := client.ListClientConfig(context.Background(), cli, req) 55 | if err != nil { 56 | return err 57 | } 58 | 59 | data := make([][]string, len(configs)) 60 | for i, config := range configs { 61 | data[i] = []string{fmt.Sprintf("%d", i), config.Key, config.Value, config.Host} 62 | } 63 | 64 | header := []string{"#", "Key", "Value", "Host"} 65 | err = lxdCmd.RenderTable(lxdCmd.TableFormatTable, header, data, configs) 66 | if err != nil { 67 | return err 68 | } 69 | 70 | return nil 71 | } 72 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/client_config_reset.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/canonical/microceph/microceph/api/types" 11 | "github.com/canonical/microceph/microceph/ceph" 12 | "github.com/canonical/microceph/microceph/client" 13 | "github.com/canonical/microceph/microceph/constants" 14 | ) 15 | 16 | type cmdClientConfigReset struct { 17 | common *CmdControl 18 | client *cmdClient 19 | clientConfig *cmdClientConfig 20 | 21 | flagWait bool 22 | flagHost string 23 | flagForce bool 24 | } 25 | 26 | func (c *cmdClientConfigReset) Command() *cobra.Command { 27 | cmd := &cobra.Command{ 28 | Use: "reset ", 29 | Short: "Removes specified Ceph Client configs", 30 | RunE: c.Run, 31 | } 32 | 33 | cmd.Flags().BoolVar(&c.flagWait, "wait", true, "Wait for required ceph services to restart post config reset.") 34 | // * stands for global configs, hence all configs are global by default unless specifies. 35 | cmd.Flags().StringVar(&c.flagHost, "target", "*", "Specify a microceph node the provided config should be applied to.") 36 | cmd.Flags().BoolVar(&c.flagForce, "yes-i-really-mean-it", false, "Force microceph to reset all client configs records for given key.") 37 | return cmd 38 | } 39 | 40 | func (c *cmdClientConfigReset) Run(cmd *cobra.Command, args []string) error { 41 | allowList := ceph.GetClientConfigSet() 42 | if len(args) != 1 { 43 | return cmd.Help() 44 | } 45 | 46 | _, ok := allowList[args[0]] 47 | if !ok { 48 | return fmt.Errorf("resetting key %s is not supported.\nSupported Keys: %v", args[0], allowList.Keys()) 49 | } 50 | 51 | if !c.flagForce { 52 | return fmt.Errorf("WARNING: this will *PERMANENTLY REMOVE* all records of the %s key. %s", 53 | args[0], constants.CliForcePrompt) 54 | } 55 | 56 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 57 | if err != nil { 58 | return fmt.Errorf("unable to configure MicroCeph: %w", err) 59 | } 60 | 61 | cli, err := m.LocalClient() 62 | if err != nil { 63 | return err 64 | } 65 | 66 | req := &types.ClientConfig{ 67 | Key: args[0], 68 | Wait: c.flagWait, 69 | Host: c.flagHost, 70 | } 71 | 72 | err = client.ResetClientConfig(context.Background(), cli, req) 73 | if err != nil { 74 | return err 75 | } 76 | 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/client_config_set.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/canonical/microceph/microceph/api/types" 11 | "github.com/canonical/microceph/microceph/ceph" 12 | "github.com/canonical/microceph/microceph/client" 13 | ) 14 | 15 | type cmdClientConfigSet struct { 16 | common *CmdControl 17 | client *cmdClient 18 | clientConfig *cmdClientConfig 19 | 20 | flagWait bool 21 | flagHost string 22 | } 23 | 24 | func (c *cmdClientConfigSet) Command() *cobra.Command { 25 | cmd := &cobra.Command{ 26 | Use: "set ", 27 | Short: "Sets specified Ceph Client config", 28 | RunE: c.Run, 29 | } 30 | 31 | cmd.Flags().BoolVar(&c.flagWait, "wait", true, "Wait for configs to propagate across the cluster.") 32 | // * stands for global configs, hence all configs are global by default unless specifies. 33 | cmd.Flags().StringVar(&c.flagHost, "target", "*", "Specify a microceph node the provided config should be applied to.") 34 | return cmd 35 | } 36 | 37 | func (c *cmdClientConfigSet) Run(cmd *cobra.Command, args []string) error { 38 | allowList := ceph.GetClientConfigSet() 39 | if len(args) != 2 { 40 | return cmd.Help() 41 | } 42 | 43 | _, ok := allowList[args[0]] 44 | if !ok { 45 | return fmt.Errorf("configuring key %s is not supported.\nSupported Keys: %v", args[0], allowList.Keys()) 46 | } 47 | 48 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 49 | if err != nil { 50 | return fmt.Errorf("unable to configure MicroCeph: %w", err) 51 | } 52 | 53 | cli, err := m.LocalClient() 54 | if err != nil { 55 | return err 56 | } 57 | 58 | req := &types.ClientConfig{ 59 | Key: args[0], 60 | Value: args[1], 61 | Wait: c.flagWait, 62 | Host: c.flagHost, 63 | } 64 | 65 | err = client.SetClientConfig(context.Background(), cli, req) 66 | if err != nil { 67 | return err 68 | } 69 | 70 | return nil 71 | } 72 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdCluster struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdCluster) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "cluster", 14 | Short: "Manage the MicroCeph cluster", 15 | } 16 | 17 | // Add 18 | clusterAddCmd := cmdClusterAdd{common: c.common, cluster: c} 19 | cmd.AddCommand(clusterAddCmd.Command()) 20 | 21 | // Bootstrap 22 | clusterBootstrapCmd := cmdClusterBootstrap{common: c.common, cluster: c} 23 | cmd.AddCommand(clusterBootstrapCmd.Command()) 24 | 25 | // Join 26 | clusterJoinCmd := cmdClusterJoin{common: c.common, cluster: c} 27 | cmd.AddCommand(clusterJoinCmd.Command()) 28 | 29 | // List 30 | clusterListCmd := cmdClusterList{common: c.common, cluster: c} 31 | cmd.AddCommand(clusterListCmd.Command()) 32 | 33 | // Remove 34 | clusterRemoveCmd := cmdClusterRemove{common: c.common, cluster: c} 35 | cmd.AddCommand(clusterRemoveCmd.Command()) 36 | 37 | // SQL 38 | clusterSQLCmd := cmdClusterSQL{common: c.common, cluster: c} 39 | cmd.AddCommand(clusterSQLCmd.Command()) 40 | 41 | // Export 42 | clusterExportCmd := cmdClusterExport{common: c.common, cluster: c} 43 | cmd.AddCommand(clusterExportCmd.Command()) 44 | 45 | // Config Subcommand 46 | clusterConfigCmd := cmdClusterConfig{common: c.common, cluster: c} 47 | cmd.AddCommand(clusterConfigCmd.Command()) 48 | 49 | // Migrate Subcommand 50 | clusterMigrateCmd := cmdClusterMigrate{common: c.common, cluster: c} 51 | cmd.AddCommand(clusterMigrateCmd.Command()) 52 | 53 | // Maintenance Subcommand 54 | clusterMaintenance := cmdClusterMaintenance{common: c.common} 55 | cmd.AddCommand(clusterMaintenance.Command()) 56 | 57 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 58 | cmd.Args = cobra.NoArgs 59 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 60 | 61 | return cmd 62 | } 63 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_add.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | type cmdClusterAdd struct { 13 | common *CmdControl 14 | cluster *cmdCluster 15 | 16 | flagTokenDuration string 17 | } 18 | 19 | func (c *cmdClusterAdd) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "add ", 22 | Short: "Generates a token for a new server", 23 | RunE: c.Run, 24 | } 25 | 26 | cmd.Flags().StringVarP(&c.flagTokenDuration, "timeout", "t", "3h", "Set the lifetime for the token. Default is 3 hours. (eg. 10s, 5m, 3h)") 27 | 28 | return cmd 29 | } 30 | 31 | func (c *cmdClusterAdd) Run(cmd *cobra.Command, args []string) error { 32 | if len(args) != 1 { 33 | return cmd.Help() 34 | } 35 | 36 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | expireAfter, err := time.ParseDuration(c.flagTokenDuration) 42 | if err != nil { 43 | return fmt.Errorf("Invalid value for timeout flag: %w", err) 44 | } 45 | 46 | token, err := m.NewJoinToken(context.Background(), args[0], expireAfter) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | fmt.Println(token) 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdClusterConfig struct { 8 | common *CmdControl 9 | cluster *cmdCluster 10 | } 11 | 12 | func (c *cmdClusterConfig) Command() *cobra.Command { 13 | cmd := &cobra.Command{ 14 | Use: "config", 15 | Short: "Manage Ceph Cluster configs", 16 | } 17 | 18 | // Get 19 | clusterConfigGetCmd := cmdClusterConfigGet{common: c.common, cluster: c.cluster, clusterConfig: c} 20 | cmd.AddCommand(clusterConfigGetCmd.Command()) 21 | 22 | // Set 23 | clusterConfigSetCmd := cmdClusterConfigSet{common: c.common, cluster: c.cluster, clusterConfig: c} 24 | cmd.AddCommand(clusterConfigSetCmd.Command()) 25 | 26 | // Reset 27 | clusterConfigResetCmd := cmdClusterConfigReset{common: c.common, cluster: c.cluster, clusterConfig: c} 28 | cmd.AddCommand(clusterConfigResetCmd.Command()) 29 | 30 | // List 31 | clusterConfigListCmd := cmdClusterConfigList{common: c.common, cluster: c.cluster, clusterConfig: c} 32 | cmd.AddCommand(clusterConfigListCmd.Command()) 33 | 34 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 35 | cmd.Args = cobra.NoArgs 36 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 37 | 38 | return cmd 39 | } 40 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_config_get.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | lxdCmd "github.com/canonical/lxd/shared/cmd" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/canonical/microceph/microceph/api/types" 12 | "github.com/canonical/microceph/microceph/client" 13 | ) 14 | 15 | type cmdClusterConfigGet struct { 16 | common *CmdControl 17 | cluster *cmdCluster 18 | clusterConfig *cmdClusterConfig 19 | } 20 | 21 | func (c *cmdClusterConfigGet) Command() *cobra.Command { 22 | cmd := &cobra.Command{ 23 | Use: "get ", 24 | Short: "Get specified Ceph Cluster config", 25 | RunE: c.Run, 26 | } 27 | 28 | return cmd 29 | } 30 | 31 | func (c *cmdClusterConfigGet) Run(cmd *cobra.Command, args []string) error { 32 | // Get can be called with a single key. 33 | if len(args) != 1 { 34 | return cmd.Help() 35 | } 36 | 37 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 38 | if err != nil { 39 | return fmt.Errorf("unable to configure MicroCeph: %w", err) 40 | } 41 | 42 | cli, err := m.LocalClient() 43 | if err != nil { 44 | return err 45 | } 46 | 47 | req := &types.Config{ 48 | Key: args[0], 49 | } 50 | 51 | configs, err := client.GetConfig(context.Background(), cli, req) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | data := make([][]string, len(configs)) 57 | for i, config := range configs { 58 | data[i] = []string{fmt.Sprintf("%d", i), config.Key, config.Value} 59 | } 60 | 61 | header := []string{"#", "Key", "Value"} 62 | err = lxdCmd.RenderTable(lxdCmd.TableFormatTable, header, data, configs) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | return nil 68 | } 69 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_config_list.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | lxdCmd "github.com/canonical/lxd/shared/cmd" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/canonical/microceph/microceph/api/types" 12 | "github.com/canonical/microceph/microceph/client" 13 | ) 14 | 15 | type cmdClusterConfigList struct { 16 | common *CmdControl 17 | cluster *cmdCluster 18 | clusterConfig *cmdClusterConfig 19 | } 20 | 21 | func (c *cmdClusterConfigList) Command() *cobra.Command { 22 | cmd := &cobra.Command{ 23 | Use: "list", 24 | Short: "List all set Ceph level configs", 25 | RunE: c.Run, 26 | } 27 | 28 | return cmd 29 | } 30 | 31 | func (c *cmdClusterConfigList) Run(cmd *cobra.Command, args []string) error { 32 | if len(args) != 0 { 33 | return cmd.Help() 34 | } 35 | 36 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 37 | if err != nil { 38 | return fmt.Errorf("Unable to configure MicroCeph: %w", err) 39 | } 40 | 41 | cli, err := m.LocalClient() 42 | if err != nil { 43 | return err 44 | } 45 | 46 | // Create an empty Key request. 47 | req := &types.Config{ 48 | Key: "", 49 | } 50 | 51 | configs, err := client.GetConfig(context.Background(), cli, req) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | data := make([][]string, len(configs)) 57 | for i, config := range configs { 58 | data[i] = []string{fmt.Sprintf("%d", i), config.Key, config.Value} 59 | } 60 | 61 | header := []string{"#", "Key", "Value"} 62 | err = lxdCmd.RenderTable(lxdCmd.TableFormatTable, header, data, configs) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | return nil 68 | } 69 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_config_reset.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/canonical/microceph/microceph/api/types" 11 | "github.com/canonical/microceph/microceph/client" 12 | ) 13 | 14 | type cmdClusterConfigReset struct { 15 | common *CmdControl 16 | cluster *cmdCluster 17 | clusterConfig *cmdClusterConfig 18 | 19 | flagWait bool 20 | flagSkipRestart bool 21 | } 22 | 23 | func (c *cmdClusterConfigReset) Command() *cobra.Command { 24 | cmd := &cobra.Command{ 25 | Use: "reset ", 26 | Short: "Clear specified Ceph Cluster config", 27 | RunE: c.Run, 28 | } 29 | 30 | cmd.Flags().BoolVar(&c.flagWait, "wait", false, "Wait for required ceph services to restart post config reset.") 31 | cmd.Flags().BoolVar(&c.flagSkipRestart, "skip-restart", false, "Don't perform the daemon restart for current config.") 32 | return cmd 33 | } 34 | 35 | func (c *cmdClusterConfigReset) Run(cmd *cobra.Command, args []string) error { 36 | if len(args) != 1 { 37 | return cmd.Help() 38 | } 39 | 40 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 41 | if err != nil { 42 | return fmt.Errorf("unable to configure MicroCeph: %w", err) 43 | } 44 | 45 | cli, err := m.LocalClient() 46 | if err != nil { 47 | return err 48 | } 49 | 50 | req := &types.Config{ 51 | Key: args[0], 52 | Wait: c.flagWait, 53 | SkipRestart: c.flagSkipRestart, 54 | } 55 | 56 | err = client.ClearConfig(context.Background(), cli, req) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_config_set.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/canonical/microceph/microceph/api/types" 11 | "github.com/canonical/microceph/microceph/client" 12 | ) 13 | 14 | type cmdClusterConfigSet struct { 15 | common *CmdControl 16 | cluster *cmdCluster 17 | clusterConfig *cmdClusterConfig 18 | 19 | flagWait bool 20 | flagSkipRestart bool 21 | } 22 | 23 | func (c *cmdClusterConfigSet) Command() *cobra.Command { 24 | cmd := &cobra.Command{ 25 | Use: "set ", 26 | Short: "Set specified Ceph Cluster config", 27 | RunE: c.Run, 28 | } 29 | 30 | cmd.Flags().BoolVar(&c.flagWait, "wait", false, "Wait for required ceph services to restart post config set.") 31 | cmd.Flags().BoolVar(&c.flagSkipRestart, "skip-restart", false, "Don't perform the daemon restart for current config.") 32 | return cmd 33 | } 34 | 35 | func (c *cmdClusterConfigSet) Run(cmd *cobra.Command, args []string) error { 36 | if len(args) != 2 { 37 | return cmd.Help() 38 | } 39 | 40 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 41 | if err != nil { 42 | return fmt.Errorf("unable to configure MicroCeph: %w", err) 43 | } 44 | 45 | cli, err := m.LocalClient() 46 | if err != nil { 47 | return err 48 | } 49 | 50 | req := &types.Config{ 51 | Key: args[0], 52 | Value: args[1], 53 | Wait: c.flagWait, 54 | SkipRestart: c.flagSkipRestart, 55 | } 56 | 57 | err = client.SetConfig(context.Background(), cli, req) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | return nil 63 | } 64 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_export.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | 7 | "github.com/canonical/microceph/microceph/api/types" 8 | "github.com/canonical/microceph/microceph/client" 9 | "github.com/canonical/microcluster/v2/microcluster" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | type cmdClusterExport struct { 14 | common *CmdControl 15 | cluster *cmdCluster 16 | json bool 17 | } 18 | 19 | func (c *cmdClusterExport) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "export ", 22 | Short: "Generates cluster token for Remote cluster with given name", 23 | RunE: c.Run, 24 | } 25 | 26 | cmd.Flags().BoolVar(&c.json, "json", false, "output as json string") 27 | return cmd 28 | } 29 | 30 | func (c *cmdClusterExport) Run(cmd *cobra.Command, args []string) error { 31 | if len(args) != 1 { 32 | return cmd.Help() 33 | } 34 | 35 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 36 | if err != nil { 37 | return err 38 | } 39 | 40 | cli, err := m.LocalClient() 41 | if err != nil { 42 | return err 43 | } 44 | 45 | state, err := client.GetClusterToken(cmd.Context(), cli, types.ClusterExportRequest{ 46 | RemoteName: args[0], 47 | }) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | // produce output in CLI. 53 | if c.json { 54 | jsonOut, err := base64.StdEncoding.DecodeString(state) 55 | if err != nil { 56 | return err 57 | } 58 | fmt.Printf("%s\n", jsonOut) 59 | } else { 60 | fmt.Println(state) 61 | } 62 | 63 | return nil 64 | } 65 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_join.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/canonical/lxd/lxd/util" 10 | "github.com/canonical/microcluster/v2/microcluster" 11 | "github.com/spf13/cobra" 12 | 13 | "github.com/canonical/microceph/microceph/constants" 14 | ) 15 | 16 | type cmdClusterJoin struct { 17 | common *CmdControl 18 | cluster *cmdCluster 19 | 20 | flagMicroCephIp string 21 | } 22 | 23 | func (c *cmdClusterJoin) Command() *cobra.Command { 24 | cmd := &cobra.Command{ 25 | Use: "join ", 26 | Short: "Joins an existing cluster", 27 | RunE: c.Run, 28 | } 29 | 30 | cmd.Flags().StringVar(&c.flagMicroCephIp, "microceph-ip", "", "Network address microceph daemon binds to.") 31 | return cmd 32 | } 33 | 34 | func (c *cmdClusterJoin) Run(cmd *cobra.Command, args []string) error { 35 | if len(args) != 1 { 36 | return cmd.Help() 37 | } 38 | 39 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 40 | if err != nil { 41 | return fmt.Errorf("unable to configure MicroCluster: %w", err) 42 | } 43 | 44 | // Get system hostname. 45 | hostname, err := os.Hostname() 46 | if err != nil { 47 | return fmt.Errorf("failed to retrieve system hostname: %w", err) 48 | } 49 | 50 | address := c.flagMicroCephIp 51 | if address == "" { 52 | // Get system address for microcluster join. 53 | address = util.NetworkInterfaceAddress() 54 | } 55 | address = util.CanonicalNetworkAddress(address, constants.BootstrapPortConst) 56 | 57 | token := args[0] 58 | ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) 59 | defer cancel() 60 | 61 | return m.JoinCluster(ctx, hostname, address, token, nil) 62 | } 63 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_list.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | 7 | "github.com/canonical/lxd/shared" 8 | lxdCmd "github.com/canonical/lxd/shared/cmd" 9 | "github.com/canonical/microcluster/v2/microcluster" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | type cmdClusterList struct { 14 | common *CmdControl 15 | cluster *cmdCluster 16 | } 17 | 18 | func (c *cmdClusterList) Command() *cobra.Command { 19 | cmd := &cobra.Command{ 20 | Use: "list", 21 | Short: "List servers in the cluster", 22 | RunE: c.Run, 23 | } 24 | 25 | return cmd 26 | } 27 | 28 | func (c *cmdClusterList) Run(cmd *cobra.Command, args []string) error { 29 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 30 | if err != nil { 31 | return err 32 | } 33 | 34 | client, err := m.LocalClient() 35 | if err != nil { 36 | return err 37 | } 38 | 39 | clusterMembers, err := client.GetClusterMembers(context.Background()) 40 | if err != nil { 41 | return err 42 | } 43 | 44 | data := make([][]string, len(clusterMembers)) 45 | for i, clusterMember := range clusterMembers { 46 | fingerprint, err := shared.CertFingerprintStr(clusterMember.Certificate.String()) 47 | if err != nil { 48 | continue 49 | } 50 | 51 | data[i] = []string{clusterMember.Name, clusterMember.Address.String(), clusterMember.Role, fingerprint, string(clusterMember.Status)} 52 | } 53 | 54 | header := []string{"NAME", "ADDRESS", "ROLE", "FINGERPRINT", "STATUS"} 55 | sort.Sort(lxdCmd.SortColumnsNaturally(data)) 56 | 57 | return lxdCmd.RenderTable(lxdCmd.TableFormatTable, header, data, clusterMembers) 58 | } 59 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_maintenance.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdClusterMaintenance struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdClusterMaintenance) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "maintenance", 14 | Short: "Enter or exit the maintenance mode.", 15 | } 16 | 17 | // Exit 18 | clusterMaintenanceExit := cmdClusterMaintenanceExit{common: c.common} 19 | cmd.AddCommand(clusterMaintenanceExit.Command()) 20 | 21 | // Enter 22 | clusterMaintenanceEnter := cmdClusterMaintenanceEnter{common: c.common} 23 | cmd.AddCommand(clusterMaintenanceEnter.Command()) 24 | 25 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 26 | cmd.Args = cobra.NoArgs 27 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 28 | 29 | return cmd 30 | } 31 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_maintenance_enter.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/canonical/microcluster/v2/microcluster" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/canonical/microceph/microceph/client" 10 | ) 11 | 12 | type cmdClusterMaintenanceEnter struct { 13 | common *CmdControl 14 | 15 | flagForce bool 16 | flagDryRun bool 17 | flagSetNoout bool 18 | flagStopOsds bool 19 | flagCheckOnly bool 20 | flagIgnoreCheck bool 21 | } 22 | 23 | func (c *cmdClusterMaintenanceEnter) Command() *cobra.Command { 24 | cmd := &cobra.Command{ 25 | Use: "enter ", 26 | Short: "Enter maintenance mode.", 27 | RunE: c.Run, 28 | } 29 | 30 | cmd.Flags().BoolVar(&c.flagForce, "force", false, "Force to enter maintenance mode.") 31 | cmd.Flags().BoolVar(&c.flagDryRun, "dry-run", false, "Dry run the command.") 32 | cmd.Flags().BoolVar(&c.flagSetNoout, "set-noout", true, "Stop CRUSH from rebalancing the cluster.") 33 | cmd.Flags().BoolVar(&c.flagStopOsds, "stop-osds", false, "Stop the OSDS when entering maintenance mode.") 34 | cmd.Flags().BoolVar(&c.flagCheckOnly, "check-only", false, "Only run the preflight checks (mutually exclusive with --ignore-check).") 35 | cmd.Flags().BoolVar(&c.flagIgnoreCheck, "ignore-check", false, "Ignore the the preflight checks (mutually exclusive with --check-only).") 36 | cmd.MarkFlagsMutuallyExclusive("check-only", "ignore-check") 37 | return cmd 38 | } 39 | 40 | func (c *cmdClusterMaintenanceEnter) Run(cmd *cobra.Command, args []string) error { 41 | if len(args) != 1 { 42 | return cmd.Help() 43 | } 44 | 45 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | cli, err := m.LocalClient() 51 | if err != nil { 52 | return err 53 | } 54 | 55 | results, err := client.EnterMaintenance(context.Background(), cli, args[0], c.flagForce, c.flagDryRun, c.flagSetNoout, c.flagStopOsds, c.flagCheckOnly, c.flagIgnoreCheck) 56 | if err != nil && !c.flagForce { 57 | return fmt.Errorf("failed to enter maintenance mode: %v", err) 58 | } 59 | 60 | for _, result := range results { 61 | if c.flagDryRun { 62 | fmt.Println(result.Action) 63 | } else { 64 | errMessage := result.Error 65 | if errMessage == "" { 66 | fmt.Printf("%s (succeeded)\n", result.Action) 67 | } else { 68 | fmt.Printf("%s (failed: %s)\n", result.Action, errMessage) 69 | } 70 | } 71 | } 72 | 73 | return nil 74 | } 75 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_maintenance_exit.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/canonical/microcluster/v2/microcluster" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/canonical/microceph/microceph/client" 10 | ) 11 | 12 | type cmdClusterMaintenanceExit struct { 13 | common *CmdControl 14 | 15 | flagDryRun bool 16 | flagCheckOnly bool 17 | flagIgnoreCheck bool 18 | } 19 | 20 | func (c *cmdClusterMaintenanceExit) Command() *cobra.Command { 21 | cmd := &cobra.Command{ 22 | Use: "exit ", 23 | Short: "Exit maintenance mode.", 24 | RunE: c.Run, 25 | } 26 | 27 | cmd.Flags().BoolVar(&c.flagDryRun, "dry-run", false, "Dry run the command.") 28 | cmd.Flags().BoolVar(&c.flagCheckOnly, "check-only", false, "Only run the preflight checks (mutually exclusive with --ignore-check).") 29 | cmd.Flags().BoolVar(&c.flagIgnoreCheck, "ignore-check", false, "Ignore the the preflight checks (mutually exclusive with --check-only).") 30 | cmd.MarkFlagsMutuallyExclusive("check-only", "ignore-check") 31 | 32 | return cmd 33 | } 34 | 35 | func (c *cmdClusterMaintenanceExit) Run(cmd *cobra.Command, args []string) error { 36 | if len(args) != 1 { 37 | return cmd.Help() 38 | } 39 | 40 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | cli, err := m.LocalClient() 46 | if err != nil { 47 | return err 48 | } 49 | 50 | results, err := client.ExitMaintenance(context.Background(), cli, args[0], c.flagDryRun, c.flagCheckOnly, c.flagIgnoreCheck) 51 | if err != nil { 52 | return fmt.Errorf("failed to exit maintenance mode: %v", err) 53 | } 54 | 55 | for _, result := range results { 56 | if c.flagDryRun { 57 | fmt.Println(result.Action) 58 | } else { 59 | errMessage := result.Error 60 | if errMessage == "" { 61 | fmt.Printf("%s (succeeded)\n", result.Action) 62 | } else { 63 | fmt.Printf("%s (failed: %s)\n", result.Action, errMessage) 64 | } 65 | } 66 | } 67 | 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_migrate.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/lxd/shared/logger" 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/canonical/microceph/microceph/api/types" 11 | "github.com/canonical/microceph/microceph/client" 12 | ) 13 | 14 | type cmdClusterMigrate struct { 15 | common *CmdControl 16 | cluster *cmdCluster 17 | } 18 | 19 | func (c *cmdClusterMigrate) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "migrate ", 20 | Short: "Removes a server from the cluster", 21 | RunE: c.Run, 22 | } 23 | 24 | cmd.Flags().BoolVarP(&c.flagForce, "force", "f", false, "Forcibly remove the cluster member") 25 | 26 | return cmd 27 | } 28 | 29 | func (c *cmdClusterRemove) Run(cmd *cobra.Command, args []string) error { 30 | if len(args) != 1 { 31 | return cmd.Help() 32 | } 33 | 34 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | cli, err := m.LocalClient() 40 | if err != nil { 41 | return err 42 | } 43 | 44 | return cli.DeleteClusterMember(context.Background(), args[0], c.flagForce) 45 | } 46 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/cluster_sql.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/olekukonko/tablewriter" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | type cmdClusterSQL struct { 14 | common *CmdControl 15 | cluster *cmdCluster 16 | } 17 | 18 | func (c *cmdClusterSQL) Command() *cobra.Command { 19 | cmd := &cobra.Command{ 20 | Use: "sql ", 21 | Short: "Runs a SQL query against the cluster database", 22 | RunE: c.Run, 23 | } 24 | 25 | return cmd 26 | } 27 | 28 | func (c *cmdClusterSQL) Run(cmd *cobra.Command, args []string) error { 29 | if len(args) != 1 { 30 | err := cmd.Help() 31 | if err != nil { 32 | return fmt.Errorf("Unable to load help: %w", err) 33 | } 34 | 35 | if len(args) == 0 { 36 | return nil 37 | } 38 | } 39 | 40 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | query := args[0] 46 | dump, batch, err := m.SQL(context.Background(), query) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | if dump != "" { 52 | fmt.Print(dump) 53 | return nil 54 | } 55 | 56 | for i, result := range batch.Results { 57 | if len(batch.Results) > 1 { 58 | fmt.Printf("=> Query %d:\n\n", i) 59 | } 60 | 61 | if result.Type == "select" { 62 | sqlPrintSelectResult(result.Columns, result.Rows) 63 | } else { 64 | fmt.Printf("Rows affected: %d\n", result.RowsAffected) 65 | } 66 | 67 | if len(batch.Results) > 1 { 68 | fmt.Printf("\n") 69 | } 70 | } 71 | return nil 72 | } 73 | 74 | func sqlPrintSelectResult(columns []string, rows [][]any) { 75 | table := tablewriter.NewWriter(os.Stdout) 76 | table.SetAlignment(tablewriter.ALIGN_LEFT) 77 | table.SetAutoWrapText(false) 78 | table.SetAutoFormatHeaders(false) 79 | table.SetHeader(columns) 80 | for _, row := range rows { 81 | data := []string{} 82 | for _, col := range row { 83 | data = append(data, fmt.Sprintf("%v", col)) 84 | } 85 | 86 | table.Append(data) 87 | } 88 | 89 | table.Render() 90 | } 91 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/disable.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdDisable struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdDisable) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "disable", 14 | Short: "Disables a feature on the cluster", 15 | } 16 | 17 | // Disable RGW 18 | disableRGWCmd := cmdDisableRGW{common: c.common} 19 | cmd.AddCommand(disableRGWCmd.Command()) 20 | 21 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 22 | cmd.Args = cobra.NoArgs 23 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 24 | 25 | return cmd 26 | } 27 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/disable_rgw.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microcluster/v2/microcluster" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/canonical/microceph/microceph/client" 10 | ) 11 | 12 | type cmdDisableRGW struct { 13 | common *CmdControl 14 | flagTarget string 15 | } 16 | 17 | func (c *cmdDisableRGW) Command() *cobra.Command { 18 | cmd := &cobra.Command{ 19 | Use: "rgw", 20 | Short: "Disable the RGW service on this node", 21 | RunE: c.Run, 22 | } 23 | cmd.PersistentFlags().StringVar(&c.flagTarget, "target", "", "Server hostname (default: this server)") 24 | return cmd 25 | } 26 | 27 | // Run handles the disable rgw command. 28 | func (c *cmdDisableRGW) Run(cmd *cobra.Command, args []string) error { 29 | 30 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | cli, err := m.LocalClient() 36 | if err != nil { 37 | return err 38 | } 39 | 40 | err = client.DeleteService(context.Background(), cli, c.flagTarget, "rgw") 41 | if err != nil { 42 | return err 43 | } 44 | 45 | return nil 46 | } 47 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/disk.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdDisk struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdDisk) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "disk", 14 | Short: "Manage the MicroCeph disks", 15 | } 16 | 17 | // Add 18 | diskAddCmd := cmdDiskAdd{common: c.common, disk: c} 19 | cmd.AddCommand(diskAddCmd.Command()) 20 | 21 | // List 22 | diskListCmd := cmdDiskList{common: c.common, disk: c} 23 | cmd.AddCommand(diskListCmd.Command()) 24 | 25 | // Remove 26 | diskRemoveCmd := cmdDiskRemove{common: c.common, disk: c} 27 | cmd.AddCommand(diskRemoveCmd.Command()) 28 | 29 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 30 | cmd.Args = cobra.NoArgs 31 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 32 | 33 | return cmd 34 | } 35 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/enable.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdEnable struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdEnable) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "enable", 14 | Short: "Enables a feature or service on the cluster", 15 | } 16 | 17 | // Enable RGW 18 | enableRGWCmd := cmdEnableRGW{common: c.common} 19 | enableMonCmd := cmdEnableMON{common: c.common} 20 | enableMgrCmd := cmdEnableMGR{common: c.common} 21 | enableMdsCmd := cmdEnableMDS{common: c.common} 22 | enableRbdMirrorCmd := cmdEnableRBDMirror{common: c.common} 23 | 24 | cmd.AddCommand(enableRGWCmd.Command()) 25 | cmd.AddCommand(enableMonCmd.Command()) 26 | cmd.AddCommand(enableMgrCmd.Command()) 27 | cmd.AddCommand(enableMdsCmd.Command()) 28 | cmd.AddCommand(enableRbdMirrorCmd.Command()) 29 | 30 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 31 | cmd.Args = cobra.NoArgs 32 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 33 | 34 | return cmd 35 | } 36 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/enable_mds.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microcluster/v2/microcluster" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/canonical/microceph/microceph/api/types" 10 | "github.com/canonical/microceph/microceph/client" 11 | ) 12 | 13 | type cmdEnableMDS struct { 14 | common *CmdControl 15 | wait bool 16 | flagTarget string 17 | } 18 | 19 | func (c *cmdEnableMDS) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "mds [--target ] [--wait ]", 22 | Short: "Enable the MDS service on the --target server (default: this server)", 23 | RunE: c.Run, 24 | } 25 | cmd.PersistentFlags().StringVar(&c.flagTarget, "target", "", "Server hostname (default: this server)") 26 | cmd.Flags().BoolVar(&c.wait, "wait", true, "Wait for mds service to be up.") 27 | return cmd 28 | } 29 | 30 | // Run handles the enable mds command. 31 | func (c *cmdEnableMDS) Run(cmd *cobra.Command, args []string) error { 32 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | cli, err := m.LocalClient() 38 | if err != nil { 39 | return err 40 | } 41 | 42 | req := &types.EnableService{ 43 | Name: "mds", 44 | Wait: c.wait, 45 | Payload: "", 46 | } 47 | 48 | err = client.SendServicePlacementReq(context.Background(), cli, req, c.flagTarget) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/enable_mgr.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microcluster/v2/microcluster" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/canonical/microceph/microceph/api/types" 10 | "github.com/canonical/microceph/microceph/client" 11 | ) 12 | 13 | type cmdEnableMGR struct { 14 | common *CmdControl 15 | wait bool 16 | flagTarget string 17 | } 18 | 19 | func (c *cmdEnableMGR) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "mgr [--target ] [--wait ]", 22 | Short: "Enable the MGR service on the --target server (default: this server)", 23 | RunE: c.Run, 24 | } 25 | cmd.PersistentFlags().StringVar(&c.flagTarget, "target", "", "Server hostname (default: this server)") 26 | cmd.Flags().BoolVar(&c.wait, "wait", true, "Wait for mgr service to be up.") 27 | return cmd 28 | } 29 | 30 | // Run handles the enable mgr command. 31 | func (c *cmdEnableMGR) Run(cmd *cobra.Command, args []string) error { 32 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | cli, err := m.LocalClient() 38 | if err != nil { 39 | return err 40 | } 41 | 42 | req := &types.EnableService{ 43 | Name: "mgr", 44 | Wait: c.wait, 45 | Payload: "", 46 | } 47 | 48 | err = client.SendServicePlacementReq(context.Background(), cli, req, c.flagTarget) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/enable_mon.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microcluster/v2/microcluster" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/canonical/microceph/microceph/api/types" 10 | "github.com/canonical/microceph/microceph/client" 11 | ) 12 | 13 | type cmdEnableMON struct { 14 | common *CmdControl 15 | wait bool 16 | flagTarget string 17 | } 18 | 19 | func (c *cmdEnableMON) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "mon [--target ] [--wait ]", 22 | Short: "Enable the MON service on the --target server (default: this server)", 23 | RunE: c.Run, 24 | } 25 | cmd.PersistentFlags().StringVar(&c.flagTarget, "target", "", "Server hostname (default: this server)") 26 | cmd.Flags().BoolVar(&c.wait, "wait", true, "Wait for mon service to be up.") 27 | return cmd 28 | } 29 | 30 | // Run handles the enable mon command. 31 | func (c *cmdEnableMON) Run(cmd *cobra.Command, args []string) error { 32 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | cli, err := m.LocalClient() 38 | if err != nil { 39 | return err 40 | } 41 | cli = cli.UseTarget(c.flagTarget) 42 | req := &types.EnableService{ 43 | Name: "mon", 44 | Wait: c.wait, 45 | Payload: "", 46 | } 47 | 48 | err = client.SendServicePlacementReq(context.Background(), cli, req, c.flagTarget) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/enable_rbd_mirror.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microcluster/v2/microcluster" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/canonical/microceph/microceph/api/types" 10 | "github.com/canonical/microceph/microceph/client" 11 | ) 12 | 13 | type cmdEnableRBDMirror struct { 14 | common *CmdControl 15 | wait bool 16 | flagTarget string 17 | } 18 | 19 | func (c *cmdEnableRBDMirror) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "rbd-mirror [--target ] [--wait ]", 22 | Short: "Enable the RBD Mirror service on the --target server (default: this server)", 23 | RunE: c.Run, 24 | } 25 | cmd.PersistentFlags().StringVar(&c.flagTarget, "target", "", "Server hostname (default: this server)") 26 | cmd.Flags().BoolVar(&c.wait, "wait", true, "Wait for rbd-mirror service to be up.") 27 | return cmd 28 | } 29 | 30 | // Run handles the enable mon command. 31 | func (c *cmdEnableRBDMirror) Run(cmd *cobra.Command, args []string) error { 32 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | cli, err := m.LocalClient() 38 | if err != nil { 39 | return err 40 | } 41 | cli = cli.UseTarget(c.flagTarget) 42 | req := &types.EnableService{ 43 | Name: "rbd-mirror", 44 | Wait: c.wait, 45 | Payload: "", 46 | } 47 | 48 | err = client.SendServicePlacementReq(context.Background(), cli, req, c.flagTarget) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/remote.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdRemote struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdRemote) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "remote", 14 | Short: "Manage MicroCeph remotes", 15 | } 16 | 17 | // Import subcommand 18 | remoteImportCmd := cmdRemoteImport{common: c.common} 19 | cmd.AddCommand(remoteImportCmd.Command()) 20 | // List subcommand 21 | remoteListCmd := cmdRemoteList{common: c.common} 22 | cmd.AddCommand(remoteListCmd.Command()) 23 | // Remove subcommand 24 | remoteRemoveCmd := cmdRemoteRemove{common: c.common} 25 | cmd.AddCommand(remoteRemoveCmd.Command()) 26 | 27 | // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 28 | cmd.Args = cobra.NoArgs 29 | cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } 30 | 31 | return cmd 32 | } 33 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/remote_import.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "encoding/json" 7 | "fmt" 8 | 9 | "github.com/canonical/microceph/microceph/api/types" 10 | "github.com/canonical/microceph/microceph/client" 11 | "github.com/canonical/microcluster/v2/microcluster" 12 | "github.com/spf13/cobra" 13 | ) 14 | 15 | type cmdRemoteImport struct { 16 | common *CmdControl 17 | localName string 18 | } 19 | 20 | type dict map[string]interface{} 21 | 22 | func (c *cmdRemoteImport) Command() *cobra.Command { 23 | cmd := &cobra.Command{ 24 | Use: "import ", 25 | Short: "Import external MicroCeph cluster as a remote", 26 | RunE: c.Run, 27 | } 28 | 29 | cmd.PersistentFlags().StringVar(&c.localName, "local-name", "", "friendly local name for cluster") 30 | return cmd 31 | } 32 | 33 | func (c *cmdRemoteImport) Run(cmd *cobra.Command, args []string) error { 34 | if len(args) != 2 { 35 | return cmd.Help() 36 | } 37 | 38 | if len(c.localName) == 0 { 39 | return fmt.Errorf("please provide a local name using `--local-name` flag") 40 | } 41 | 42 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | cli, err := m.LocalClient() 48 | if err != nil { 49 | return err 50 | } 51 | 52 | // Read remote cluster token 53 | data := dict{} 54 | jsonContent, err := base64.StdEncoding.DecodeString(args[1]) 55 | if err != nil { 56 | return err 57 | } 58 | 59 | err = json.Unmarshal(jsonContent, &data) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | // Prepare payload for API request. 65 | payload := types.RemoteImportRequest{} 66 | payload.Init(c.localName, args[0], false) // initialise with local and remote name. 67 | for key, value := range data { 68 | payload.Config[key] = fmt.Sprintf("%s", value) 69 | } 70 | 71 | // send remote import request 72 | return client.SendRemoteImportRequest(context.Background(), cli, payload) 73 | } 74 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/remote_list.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "os" 8 | 9 | "github.com/canonical/microceph/microceph/api/types" 10 | "github.com/canonical/microceph/microceph/client" 11 | "github.com/canonical/microcluster/v2/microcluster" 12 | "github.com/jedib0t/go-pretty/v6/table" 13 | "github.com/spf13/cobra" 14 | "golang.org/x/crypto/ssh/terminal" 15 | ) 16 | 17 | type cmdRemoteList struct { 18 | common *CmdControl 19 | json bool 20 | } 21 | 22 | func (c *cmdRemoteList) Command() *cobra.Command { 23 | cmd := &cobra.Command{ 24 | Use: "list", 25 | Short: "List all configured remotes for the site", 26 | RunE: c.Run, 27 | } 28 | 29 | cmd.Flags().BoolVar(&c.json, "json", false, "output as json string") 30 | return cmd 31 | } 32 | 33 | func (c *cmdRemoteList) Run(cmd *cobra.Command, args []string) error { 34 | if len(args) != 0 { 35 | return cmd.Help() 36 | } 37 | 38 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | cli, err := m.LocalClient() 44 | if err != nil { 45 | return err 46 | } 47 | 48 | // Read remote cluster token 49 | data, err := client.FetchAllRemotes(context.Background(), cli) 50 | if err != nil { 51 | return fmt.Errorf("failed to fetch remotes: %w", err) 52 | } 53 | 54 | if c.json { 55 | return printRemotesJson(data) 56 | } 57 | 58 | return printRemoteTable(data) 59 | } 60 | 61 | func printRemotesJson(remotes []types.RemoteRecord) error { 62 | opStr, err := json.Marshal(remotes) 63 | if err != nil { 64 | return fmt.Errorf("internal error: unable to encode json output: %w", err) 65 | } 66 | 67 | fmt.Printf("%s\n", opStr) 68 | return nil 69 | } 70 | 71 | func printRemoteTable(remotes []types.RemoteRecord) error { 72 | t := table.NewWriter() 73 | t.SetOutputMirror(os.Stdout) 74 | t.AppendHeader(table.Row{"ID", "Remote Name", "Local Name"}) 75 | for _, remote := range remotes { 76 | t.AppendRow(table.Row{remote.ID, remote.Name, remote.LocalName}) 77 | } 78 | if terminal.IsTerminal(0) && terminal.IsTerminal(1) { 79 | // Set style if interactive shell. 80 | t.SetStyle(table.StyleColoredBright) 81 | } 82 | t.Render() 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/remote_remove.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microceph/microceph/client" 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | type cmdRemoteRemove struct { 12 | common *CmdControl 13 | } 14 | 15 | func (c *cmdRemoteRemove) Command() *cobra.Command { 16 | cmd := &cobra.Command{ 17 | Use: "remove ", 18 | Short: "Remove configured remote", 19 | RunE: c.Run, 20 | } 21 | 22 | return cmd 23 | } 24 | 25 | func (c *cmdRemoteRemove) Run(cmd *cobra.Command, args []string) error { 26 | if len(args) != 1 { 27 | return cmd.Help() 28 | } 29 | 30 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | cli, err := m.LocalClient() 36 | if err != nil { 37 | return err 38 | } 39 | 40 | // send remote remove request 41 | return client.SendRemoteRemoveRequest(context.Background(), cli, args[0]) 42 | } 43 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/replication.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | type cmdReplication struct { 8 | common *CmdControl 9 | } 10 | 11 | func (c *cmdReplication) Command() *cobra.Command { 12 | cmd := &cobra.Command{ 13 | Use: "replication", 14 | Short: "manage replication to remote clusters", 15 | } 16 | 17 | // Replication enable command 18 | replicationEnableCmd := cmdReplicationEnable{common: c.common} 19 | cmd.AddCommand(replicationEnableCmd.Command()) 20 | 21 | // Replication disable command 22 | replicationDisableCmd := cmdReplicationDisable{common: c.common} 23 | cmd.AddCommand(replicationDisableCmd.Command()) 24 | 25 | // Replication list command 26 | replicationListCmd := cmdReplicationList{common: c.common} 27 | cmd.AddCommand(replicationListCmd.Command()) 28 | 29 | // Replication status command 30 | replicationStatusCmd := cmdReplicationStatus{common: c.common} 31 | cmd.AddCommand(replicationStatusCmd.Command()) 32 | 33 | // Replication configure command 34 | replicationConfigureCmd := cmdReplicationConfigure{common: c.common} 35 | cmd.AddCommand(replicationConfigureCmd.Command()) 36 | 37 | // Replication promote command 38 | replicationPromoteCmd := cmdReplicationPromote{common: c.common} 39 | cmd.AddCommand(replicationPromoteCmd.Command()) 40 | 41 | // Replication demote command 42 | replicationDemoteCmd := cmdReplicationDemote{common: c.common} 43 | cmd.AddCommand(replicationDemoteCmd.Command()) 44 | 45 | return cmd 46 | } 47 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/replication_configure.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microceph/microceph/api/types" 7 | "github.com/canonical/microceph/microceph/client" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | type cmdReplicationConfigure struct { 13 | common *CmdControl 14 | } 15 | 16 | func (c *cmdReplicationConfigure) Command() *cobra.Command { 17 | cmd := &cobra.Command{ 18 | Use: "configure", 19 | Short: "configure replication parameters", 20 | } 21 | 22 | configureRbdCmd := cmdReplicationConfigureRbd{common: c.common} 23 | cmd.AddCommand(configureRbdCmd.Command()) 24 | 25 | return cmd 26 | } 27 | 28 | type cmdReplicationConfigureRbd struct { 29 | common *CmdControl 30 | schedule string 31 | } 32 | 33 | func (c *cmdReplicationConfigureRbd) Command() *cobra.Command { 34 | cmd := &cobra.Command{ 35 | Use: "rbd ", 36 | Short: "Configure replication parameters for RBD resource (Pool or Image)", 37 | RunE: c.Run, 38 | } 39 | 40 | cmd.Flags().StringVar(&c.schedule, "schedule", "", "snapshot schedule in days, hours, or minutes using d, h, m suffix respectively") 41 | return cmd 42 | } 43 | 44 | func (c *cmdReplicationConfigureRbd) Run(cmd *cobra.Command, args []string) error { 45 | if len(args) != 1 { 46 | return cmd.Help() 47 | } 48 | 49 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | cli, err := m.LocalClient() 55 | if err != nil { 56 | return err 57 | } 58 | 59 | payload, err := c.prepareRbdPayload(types.ConfigureReplicationRequest, args) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | _, err = client.SendReplicationRequest(context.Background(), cli, payload) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | return nil 70 | } 71 | 72 | func (c *cmdReplicationConfigureRbd) prepareRbdPayload(requestType types.ReplicationRequestType, args []string) (types.RbdReplicationRequest, error) { 73 | pool, image, err := types.GetPoolAndImageFromResource(args[0]) 74 | if err != nil { 75 | return types.RbdReplicationRequest{}, err 76 | } 77 | 78 | retReq := types.RbdReplicationRequest{ 79 | SourcePool: pool, 80 | SourceImage: image, 81 | Schedule: c.schedule, 82 | RequestType: requestType, 83 | ResourceType: types.GetRbdResourceType(pool, image), 84 | } 85 | 86 | return retReq, nil 87 | } 88 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/replication_demote.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microceph/microceph/api/types" 7 | "github.com/canonical/microceph/microceph/client" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | type cmdReplicationDemote struct { 13 | common *CmdControl 14 | remoteName string 15 | isForce bool 16 | } 17 | 18 | func (c *cmdReplicationDemote) Command() *cobra.Command { 19 | cmd := &cobra.Command{ 20 | Use: "demote", 21 | Short: "Demote a primary cluster to non-primary status", 22 | RunE: c.Run, 23 | } 24 | 25 | cmd.Flags().StringVar(&c.remoteName, "remote", "", "remote MicroCeph cluster name") 26 | cmd.Flags().BoolVar(&c.isForce, "yes-i-really-mean-it", false, "demote cluster irrespective of data loss") 27 | cmd.MarkFlagRequired("remote") 28 | return cmd 29 | } 30 | 31 | func (c *cmdReplicationDemote) Run(cmd *cobra.Command, args []string) error { 32 | if len(args) != 0 { 33 | return cmd.Help() 34 | } 35 | 36 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | cli, err := m.LocalClient() 42 | if err != nil { 43 | return err 44 | } 45 | 46 | payload, err := c.preparePayload(types.DemoteReplicationRequest) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | _, err = client.SendReplicationRequest(context.Background(), cli, payload) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | return nil 57 | } 58 | 59 | func (c *cmdReplicationDemote) preparePayload(requestType types.ReplicationRequestType) (types.RbdReplicationRequest, error) { 60 | retReq := types.RbdReplicationRequest{ 61 | RemoteName: c.remoteName, 62 | RequestType: requestType, 63 | ResourceType: types.RbdResourcePool, 64 | SourcePool: "", 65 | IsForceOp: c.isForce, 66 | } 67 | 68 | return retReq, nil 69 | } 70 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/replication_disable.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microceph/microceph/api/types" 7 | "github.com/canonical/microceph/microceph/client" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | type cmdReplicationDisable struct { 13 | common *CmdControl 14 | } 15 | 16 | func (c *cmdReplicationDisable) Command() *cobra.Command { 17 | cmd := &cobra.Command{ 18 | Use: "disable", 19 | Short: "Disable replication", 20 | } 21 | 22 | disableRbdCmd := cmdReplicationDisableRbd{common: c.common} 23 | cmd.AddCommand(disableRbdCmd.Command()) 24 | 25 | return cmd 26 | } 27 | 28 | type cmdReplicationDisableRbd struct { 29 | common *CmdControl 30 | isForce bool 31 | } 32 | 33 | func (c *cmdReplicationDisableRbd) Command() *cobra.Command { 34 | cmd := &cobra.Command{ 35 | Use: "rbd ", 36 | Short: "Disable replication for RBD resource (Pool or Image)", 37 | RunE: c.Run, 38 | } 39 | 40 | cmd.Flags().BoolVar(&c.isForce, "force", false, "forcefully disable replication for rbd resource") 41 | return cmd 42 | } 43 | 44 | func (c *cmdReplicationDisableRbd) Run(cmd *cobra.Command, args []string) error { 45 | if len(args) != 1 { 46 | return cmd.Help() 47 | } 48 | 49 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | cli, err := m.LocalClient() 55 | if err != nil { 56 | return err 57 | } 58 | 59 | payload, err := c.prepareRbdPayload(types.DisableReplicationRequest, args) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | _, err = client.SendReplicationRequest(context.Background(), cli, payload) 65 | return err 66 | } 67 | 68 | func (c *cmdReplicationDisableRbd) prepareRbdPayload(requestType types.ReplicationRequestType, args []string) (types.RbdReplicationRequest, error) { 69 | pool, image, err := types.GetPoolAndImageFromResource(args[0]) 70 | if err != nil { 71 | return types.RbdReplicationRequest{}, err 72 | } 73 | 74 | retReq := types.RbdReplicationRequest{ 75 | SourcePool: pool, 76 | SourceImage: image, 77 | RequestType: requestType, 78 | IsForceOp: c.isForce, 79 | ResourceType: types.GetRbdResourceType(pool, image), 80 | } 81 | 82 | return retReq, nil 83 | } 84 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/replication_promote.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/microceph/microceph/api/types" 7 | "github.com/canonical/microceph/microceph/client" 8 | "github.com/canonical/microcluster/v2/microcluster" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | type cmdReplicationPromote struct { 13 | common *CmdControl 14 | remoteName string 15 | isForce bool 16 | } 17 | 18 | func (c *cmdReplicationPromote) Command() *cobra.Command { 19 | cmd := &cobra.Command{ 20 | Use: "promote", 21 | Short: "Promote a non-primary cluster to primary status", 22 | RunE: c.Run, 23 | } 24 | 25 | cmd.Flags().StringVar(&c.remoteName, "remote", "", "remote MicroCeph cluster name") 26 | cmd.Flags().BoolVar(&c.isForce, "yes-i-really-mean-it", false, "forcefully promote site to primary") 27 | cmd.MarkFlagRequired("remote") 28 | return cmd 29 | } 30 | 31 | func (c *cmdReplicationPromote) Run(cmd *cobra.Command, args []string) error { 32 | if len(args) != 0 { 33 | return cmd.Help() 34 | } 35 | 36 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | cli, err := m.LocalClient() 42 | if err != nil { 43 | return err 44 | } 45 | 46 | payload, err := c.preparePayload(types.PromoteReplicationRequest) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | _, err = client.SendReplicationRequest(context.Background(), cli, payload) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | return nil 57 | } 58 | 59 | func (c *cmdReplicationPromote) preparePayload(requestType types.ReplicationRequestType) (types.RbdReplicationRequest, error) { 60 | retReq := types.RbdReplicationRequest{ 61 | RemoteName: c.remoteName, 62 | RequestType: requestType, 63 | IsForceOp: c.isForce, 64 | ResourceType: types.RbdResourcePool, 65 | SourcePool: "", 66 | } 67 | 68 | return retReq, nil 69 | } 70 | -------------------------------------------------------------------------------- /microceph/cmd/microceph/status.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sort" 7 | "strings" 8 | 9 | "github.com/canonical/microcluster/v2/microcluster" 10 | "github.com/spf13/cobra" 11 | 12 | "github.com/canonical/microceph/microceph/client" 13 | ) 14 | 15 | type cmdStatus struct { 16 | common *CmdControl 17 | } 18 | 19 | func (c *cmdStatus) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "status", 22 | Short: "Checks the cluster status", 23 | RunE: c.Run, 24 | } 25 | 26 | return cmd 27 | } 28 | 29 | func (c *cmdStatus) Run(cmd *cobra.Command, args []string) error { 30 | m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | cli, err := m.LocalClient() 36 | if err != nil { 37 | return err 38 | } 39 | 40 | // Get configured disks. 41 | disks, err := client.GetDisks(context.Background(), cli) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | // Get services. 47 | services, err := client.GetServices(context.Background(), cli) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | // Get cluster members. 53 | clusterMembers, err := cli.GetClusterMembers(context.Background()) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | fmt.Println("MicroCeph deployment summary:") 59 | 60 | for _, server := range clusterMembers { 61 | // Disks. 62 | diskCount := 0 63 | for _, disk := range disks { 64 | if disk.Location != server.Name { 65 | continue 66 | } 67 | 68 | diskCount++ 69 | } 70 | 71 | // Services. 72 | srvServices := []string{} 73 | for _, service := range services { 74 | if service.Location != server.Name { 75 | continue 76 | } 77 | 78 | srvServices = append(srvServices, service.Service) 79 | } 80 | sort.Strings(srvServices) 81 | 82 | if diskCount > 0 { 83 | srvServices = append(srvServices, "osd") 84 | } 85 | 86 | fmt.Printf("- %s (%s)\n", server.Name, server.Address.Addr().String()) 87 | fmt.Printf(" Services: %s\n", strings.Join(srvServices, ", ")) 88 | fmt.Printf(" Disks: %d\n", diskCount) 89 | } 90 | 91 | return nil 92 | } 93 | -------------------------------------------------------------------------------- /microceph/common/bootstrap.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | type BootstrapConfig struct { 4 | MonIp string 5 | PublicNet string 6 | ClusterNet string 7 | } 8 | 9 | func EncodeBootstrapConfig(data BootstrapConfig) map[string]string { 10 | return map[string]string{ 11 | "MonIp": data.MonIp, 12 | "PublicNet": data.PublicNet, 13 | "ClusterNet": data.ClusterNet, 14 | } 15 | } 16 | 17 | func DecodeBootstrapConfig(input map[string]string, data *BootstrapConfig) { 18 | data.MonIp = input["MonIp"] 19 | data.PublicNet = input["PublicNet"] 20 | data.ClusterNet = input["ClusterNet"] 21 | } 22 | -------------------------------------------------------------------------------- /microceph/common/cluster.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/canonical/lxd/shared/logger" 7 | "github.com/canonical/microceph/microceph/interfaces" 8 | ) 9 | 10 | func GetClusterMemberNames(ctx context.Context, s interfaces.StateInterface) ([]string, error) { 11 | leader, err := s.ClusterState().Leader() 12 | if err != nil { 13 | return nil, err 14 | } 15 | 16 | members, err := leader.GetClusterMembers(ctx) 17 | if err != nil { 18 | return nil, err 19 | } 20 | 21 | logger.Infof("Cluster Members are: %v", members) 22 | 23 | memberNames := make([]string, len(members)) 24 | for i, member := range members { 25 | memberNames[i] = member.Name 26 | } 27 | 28 | logger.Infof("Cluster Members Names are: %v", memberNames) 29 | 30 | return memberNames, nil 31 | } 32 | -------------------------------------------------------------------------------- /microceph/common/fileutils.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "time" 7 | 8 | "github.com/canonical/lxd/shared/logger" 9 | "github.com/djherbis/times" 10 | ) 11 | 12 | // FilterFilesInDir filters filenames which matches the substring in path. 13 | func FilterFilesInDir(subString string, path string) []string { 14 | files, err := filepath.Glob(path + fmt.Sprintf("*%s*", subString)) 15 | if err != nil { 16 | logger.Errorf("failure finding files {%s} at path {%s}", subString, path) 17 | return []string{} 18 | } 19 | 20 | return files 21 | } 22 | 23 | // GetFileAge fetches provided file's age in seconds; in case of errors, 0 (zero) age is 24 | // reported and errors are logged. This is because it is expected to be called for files 25 | // which may not be present. 26 | func GetFileAge(path string) float64 { 27 | t, err := times.Stat(path) 28 | if err != nil { 29 | logger.Error(err.Error()) 30 | return 0 31 | } 32 | 33 | if !t.HasBirthTime() { 34 | logger.Warnf("File %s has no birth time.", path) 35 | return 0 36 | } 37 | 38 | // age is current time - birth time. 39 | return time.Since(t.BirthTime()).Seconds() 40 | } 41 | -------------------------------------------------------------------------------- /microceph/common/set.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | type Set map[string]interface{} 4 | 5 | func (s Set) Keys() []string { 6 | keys := make([]string, len(s)) 7 | count := 0 8 | 9 | for key := range s { 10 | keys[count] = key 11 | count++ 12 | } 13 | 14 | return keys 15 | } 16 | 17 | func (s Set) IsIn(super Set) bool { 18 | flag := true 19 | 20 | // mark flag false if any key from subset is not present in superset. 21 | for key := range s { 22 | _, ok := super[key] 23 | if !ok { 24 | flag = false 25 | break // Break the loop. 26 | } 27 | } 28 | 29 | return flag 30 | } 31 | -------------------------------------------------------------------------------- /microceph/common/storage_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "github.com/canonical/microceph/microceph/tests" 5 | "github.com/stretchr/testify/suite" 6 | "os" 7 | "path/filepath" 8 | "testing" 9 | ) 10 | 11 | type StorageDeviceTestSuite struct { 12 | tests.BaseSuite 13 | devicePath string 14 | } 15 | 16 | func (s *StorageDeviceTestSuite) SetupTest() { 17 | s.BaseSuite.SetupTest() 18 | s.CopyCephConfigs() 19 | 20 | osdDir := filepath.Join(s.Tmp, "SNAP_COMMON", "data", "osd", "ceph-0") 21 | os.MkdirAll(osdDir, 0775) 22 | // create a temp file to use as a device 23 | s.devicePath = filepath.Join(s.Tmp, "device") 24 | os.Create(s.devicePath) 25 | os.MkdirAll(filepath.Join(s.Tmp, "dev"), 0775) 26 | os.Create(filepath.Join(s.Tmp, "dev", "sdb")) 27 | os.Create(filepath.Join(s.Tmp, "dev", "sdc")) 28 | 29 | // create a /proc/mounts like file 30 | mountsFile := filepath.Join(s.Tmp, "proc", "mounts") 31 | mountsContent := "/dev/root / ext4 rw,relatime,discard,errors=remount-ro 0 0\n" 32 | mountsContent += filepath.Join(s.Tmp, "dev", "sdb") + " /mnt ext2 rw,relatime 0 0\n" 33 | _ = os.WriteFile(mountsFile, []byte(mountsContent), 0644) 34 | 35 | } 36 | 37 | func (s *StorageDeviceTestSuite) TestIsCephDeviceNotADevice() { 38 | isCeph, err := IsCephDevice(s.devicePath) 39 | s.NoError(err, "There should not be an error when checking a device that is not used by Ceph") 40 | s.False(isCeph, "The device should not be identified as a Ceph device") 41 | } 42 | 43 | func (s *StorageDeviceTestSuite) TestIsCephDeviceHaveDevice() { 44 | // create a symlink to the device file 45 | os.Symlink(s.devicePath, filepath.Join(s.Tmp, "SNAP_COMMON", "data", "osd", "ceph-0", "block")) 46 | isCeph, err := IsCephDevice(s.devicePath) 47 | s.NoError(err, "There should not be an error when checking a device that is used by Ceph") 48 | s.True(isCeph, "The device should be identified as a Ceph device") 49 | } 50 | 51 | func (s *StorageDeviceTestSuite) TestIsMounted() { 52 | // we added a /proc/mounts like file containing an entry for /dev/sdb 53 | mounted, err := IsMounted("/dev/sdb") 54 | s.NoError(err, "There should not be an error when checking if a device is mounted") 55 | s.True(mounted, "The device should be mounted") 56 | 57 | mounted, err = IsMounted("/dev/sdc") 58 | s.NoError(err, "There should not be an error when checking if a device is not mounted") 59 | s.False(mounted, "The device should not be mounted") 60 | 61 | } 62 | 63 | func TestStorageDeviceSuite(t *testing.T) { 64 | suite.Run(t, new(StorageDeviceTestSuite)) 65 | } 66 | -------------------------------------------------------------------------------- /microceph/database/config.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | //go:generate -command mapper lxd-generate db mapper -t config.mapper.go 4 | //go:generate mapper reset 5 | // 6 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e ConfigItem objects table=config 7 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e ConfigItem objects-by-Key table=config 8 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e ConfigItem id table=config 9 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e ConfigItem create table=config 10 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e ConfigItem delete-by-Key table=config 11 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e ConfigItem update table=config 12 | 13 | // 14 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e ConfigItem GetMany table=config 15 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e ConfigItem GetOne table=config 16 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e ConfigItem ID table=config 17 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e ConfigItem Exists table=config 18 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e ConfigItem Create table=config 19 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e ConfigItem DeleteOne-by-Key table=config 20 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e ConfigItem Update table=config 21 | 22 | // ConfigItem is used to track the Ceph configuration. 23 | type ConfigItem struct { 24 | ID int 25 | Key string `db:"primary=yes"` 26 | Value string 27 | } 28 | 29 | // ConfigItemFilter is a required struct for use with lxd-generate. It is used for filtering fields on database fetches. 30 | type ConfigItemFilter struct { 31 | Key *string 32 | } 33 | -------------------------------------------------------------------------------- /microceph/database/disk.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | //go:generate -command mapper lxd-generate db mapper -t disk.mapper.go 4 | //go:generate mapper reset 5 | // 6 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk objects table=Disks 7 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk objects-by-Member table=Disks 8 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk objects-by-Member-and-Path table=Disks 9 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk id table=Disks 10 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk create table=Disks 11 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk delete-by-Member table=Disks 12 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk delete-by-Member-and-Path table=Disks 13 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e Disk update table=Disks 14 | // 15 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk GetMany 16 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk GetOne 17 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk ID 18 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk Exists 19 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk Create 20 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk DeleteOne-by-Member-and-Path 21 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk DeleteMany-by-Member 22 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e Disk Update 23 | 24 | // Disk is used to track the Ceph disks on a particular server. 25 | type Disk struct { 26 | ID int 27 | Member string `db:"primary=yes&join=core_cluster_members.name&joinon=Disks.member_id"` 28 | Path string `db:"primary=yes"` 29 | } 30 | 31 | // DiskFilter is a required struct for use with lxd-generate. It is used for filtering fields on database fetches. 32 | type DiskFilter struct { 33 | Member *string 34 | Path *string 35 | } 36 | -------------------------------------------------------------------------------- /microceph/database/remote.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | //go:generate -command mapper lxd-generate db mapper -t remote.mapper.go 4 | //go:generate mapper reset 5 | // 6 | //go:generate mapper stmt -d github.com/canonical/microcluster/cluster -e Remote objects table=remote 7 | //go:generate mapper stmt -d github.com/canonical/microcluster/cluster -e Remote objects-by-Name table=remote 8 | //go:generate mapper stmt -d github.com/canonical/microcluster/cluster -e Remote id table=remote 9 | //go:generate mapper stmt -d github.com/canonical/microcluster/cluster -e Remote create table=remote 10 | //go:generate mapper stmt -d github.com/canonical/microcluster/cluster -e Remote delete-by-Name table=remote 11 | //go:generate mapper stmt -d github.com/canonical/microcluster/cluster -e Remote update table=remote 12 | 13 | // 14 | //go:generate mapper method -i -d github.com/canonical/microcluster/cluster -e Remote GetMany table=remote 15 | //go:generate mapper method -i -d github.com/canonical/microcluster/cluster -e Remote GetOne table=remote 16 | //go:generate mapper method -i -d github.com/canonical/microcluster/cluster -e Remote ID table=remote 17 | //go:generate mapper method -i -d github.com/canonical/microcluster/cluster -e Remote Exists table=remote 18 | //go:generate mapper method -i -d github.com/canonical/microcluster/cluster -e Remote Create table=remote 19 | //go:generate mapper method -i -d github.com/canonical/microcluster/cluster -e Remote DeleteOne-by-Name table=remote 20 | //go:generate mapper method -i -d github.com/canonical/microcluster/cluster -e Remote Update table=remote 21 | 22 | // Remote is used to track the Remotes. 23 | type Remote struct { 24 | ID int 25 | Name string `db:"primary=yes"` 26 | LocalName string // friendly local cluster name 27 | } 28 | 29 | // RemoteItemFilter is a required struct for use with lxd-generate. It is used for filtering fields on database fetches. 30 | type RemoteFilter struct { 31 | Name *string 32 | } 33 | -------------------------------------------------------------------------------- /microceph/database/service.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | //go:generate -command mapper lxd-generate db mapper -t service.mapper.go 4 | //go:generate mapper reset 5 | // 6 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service objects table=services 7 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service objects-by-Member table=services 8 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service objects-by-Service table=services 9 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service objects-by-Member-and-Service table=services 10 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service id table=services 11 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service create table=services 12 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service delete-by-Member table=services 13 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service delete-by-Member-and-Service table=services 14 | //go:generate mapper stmt -d github.com/canonical/microcluster/v2/cluster -e service update table=services 15 | // 16 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service GetMany 17 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service GetOne 18 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service ID 19 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service Exists 20 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service Create 21 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service DeleteOne-by-Member-and-Service 22 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service DeleteMany-by-Member 23 | //go:generate mapper method -i -d github.com/canonical/microcluster/v2/cluster -e service Update 24 | 25 | // Service is used to track the Ceph services running on a particular server. 26 | type Service struct { 27 | ID int 28 | Member string `db:"primary=yes&join=core_cluster_members.name&joinon=services.member_id"` 29 | Service string `db:"primary=yes"` 30 | } 31 | 32 | // ServiceFilter is a required struct for use with lxd-generate. It is used for filtering fields on database fetches. 33 | type ServiceFilter struct { 34 | Member *string 35 | Service *string 36 | } 37 | -------------------------------------------------------------------------------- /microceph/interfaces/state.go: -------------------------------------------------------------------------------- 1 | // Package common interfaces the microcluster cluster state 2 | package interfaces 3 | 4 | import ( 5 | "github.com/canonical/microcluster/v2/state" 6 | ) 7 | 8 | // StateInterface for retrieving cluster state 9 | type StateInterface interface { 10 | ClusterState() state.State 11 | } 12 | 13 | // CephState holds cluster state 14 | type CephState struct { 15 | State state.State 16 | } 17 | 18 | // ClusterState gets the cluster state 19 | func (c CephState) ClusterState() state.State { 20 | return c.State 21 | } 22 | -------------------------------------------------------------------------------- /microceph/mocks/ConfigWriter.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v2.30.10. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import mock "github.com/stretchr/testify/mock" 6 | 7 | // ConfigWriter is an autogenerated mock type for the ConfigWriter type 8 | type ConfigWriter struct { 9 | mock.Mock 10 | } 11 | 12 | // WriteConfig provides a mock function with given fields: _a0 13 | func (_m *ConfigWriter) WriteConfig(_a0 interface{}) error { 14 | ret := _m.Called(_a0) 15 | 16 | var r0 error 17 | if rf, ok := ret.Get(0).(func(interface{}) error); ok { 18 | r0 = rf(_a0) 19 | } else { 20 | r0 = ret.Error(0) 21 | } 22 | 23 | return r0 24 | } 25 | 26 | // NewConfigWriter creates a new instance of ConfigWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 27 | // The first argument is typically a *testing.T value. 28 | func NewConfigWriter(t interface { 29 | mock.TestingT 30 | Cleanup(func()) 31 | }) *ConfigWriter { 32 | mock := &ConfigWriter{} 33 | mock.Mock.Test(t) 34 | 35 | t.Cleanup(func() { mock.AssertExpectations(t) }) 36 | 37 | return mock 38 | } 39 | -------------------------------------------------------------------------------- /microceph/mocks/MemberCounterInterface.go: -------------------------------------------------------------------------------- 1 | // Generated by mockery with a minor update as mockery confuses import paths 2 | package mocks 3 | 4 | import ( 5 | context "context" 6 | 7 | state "github.com/canonical/microcluster/v2/state" // mockery gets confused about import paths here 8 | mock "github.com/stretchr/testify/mock" 9 | ) 10 | 11 | // MemberCounterInterface is an autogenerated mock type for the MemberCounterInterface type 12 | type MemberCounterInterface struct { 13 | mock.Mock 14 | } 15 | 16 | // Count provides a mock function with given fields: s 17 | func (_m *MemberCounterInterface) Count(ctx context.Context, s state.State) (int, error) { 18 | ret := _m.Called(s) 19 | 20 | var r0 int 21 | var r1 error 22 | if rf, ok := ret.Get(0).(func(context.Context, state.State) (int, error)); ok { 23 | return rf(ctx, s) 24 | } 25 | if rf, ok := ret.Get(0).(func(context.Context, state.State) int); ok { 26 | r0 = rf(ctx, s) 27 | } else { 28 | r0 = ret.Get(0).(int) 29 | } 30 | 31 | if rf, ok := ret.Get(1).(func(context.Context, state.State) error); ok { 32 | r1 = rf(ctx, s) 33 | } else { 34 | r1 = ret.Error(1) 35 | } 36 | 37 | return r0, r1 38 | } 39 | 40 | // CountExclude provides a mock function with given fields: s, exclude 41 | func (_m *MemberCounterInterface) CountExclude(ctx context.Context, s state.State, exclude int64) (int, error) { 42 | ret := _m.Called(s, exclude) 43 | 44 | var r0 int 45 | var r1 error 46 | if rf, ok := ret.Get(0).(func(context.Context, state.State, int64) (int, error)); ok { 47 | return rf(ctx, s, exclude) 48 | } 49 | if rf, ok := ret.Get(0).(func(context.Context, state.State, int64) int); ok { 50 | r0 = rf(ctx, s, exclude) 51 | } else { 52 | r0 = ret.Get(0).(int) 53 | } 54 | 55 | if rf, ok := ret.Get(1).(func(context.Context, state.State, int64) error); ok { 56 | r1 = rf(ctx, s, exclude) 57 | } else { 58 | r1 = ret.Error(1) 59 | } 60 | 61 | return r0, r1 62 | } 63 | 64 | // NewMemberCounterInterface creates a new instance of MemberCounterInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 65 | // The first argument is typically a *testing.T value. 66 | func NewMemberCounterInterface(t interface { 67 | mock.TestingT 68 | Cleanup(func()) 69 | }) *MemberCounterInterface { 70 | mock := &MemberCounterInterface{} 71 | mock.Mock.Test(t) 72 | 73 | t.Cleanup(func() { mock.AssertExpectations(t) }) 74 | 75 | return mock 76 | } 77 | -------------------------------------------------------------------------------- /microceph/mocks/MicroclusterState.go: -------------------------------------------------------------------------------- 1 | package mocks 2 | 3 | import ( 4 | "github.com/canonical/lxd/shared" 5 | "github.com/canonical/lxd/shared/api" 6 | state "github.com/canonical/microcluster/v2/state" 7 | ) 8 | 9 | // MockState mocks the internal microcluster state. 10 | type MockState struct { 11 | state.State 12 | 13 | URL *api.URL 14 | ClusterName string 15 | } 16 | 17 | // Name returns the name supplied to MockState. 18 | func (m *MockState) Name() string { 19 | return m.ClusterName 20 | } 21 | 22 | // Address returns the address supplied to MockState. 23 | func (m *MockState) Address() *api.URL { 24 | return m.URL 25 | } 26 | 27 | // ServerCert is set to always return nil to prematurely return before making database actions. 28 | func (m *MockState) ServerCert() *shared.CertInfo { 29 | return nil 30 | } 31 | -------------------------------------------------------------------------------- /microceph/mocks/NetworkIntf.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v2.30.10. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import mock "github.com/stretchr/testify/mock" 6 | 7 | // NetworkIntf is an autogenerated mock type for the NetworkIntf type 8 | type NetworkIntf struct { 9 | mock.Mock 10 | } 11 | 12 | // FindIpOnSubnet provides a mock function with given fields: subnet 13 | func (_m *NetworkIntf) FindIpOnSubnet(subnet string) (string, error) { 14 | ret := _m.Called(subnet) 15 | 16 | var r0 string 17 | var r1 error 18 | if rf, ok := ret.Get(0).(func(string) (string, error)); ok { 19 | return rf(subnet) 20 | } 21 | if rf, ok := ret.Get(0).(func(string) string); ok { 22 | r0 = rf(subnet) 23 | } else { 24 | r0 = ret.Get(0).(string) 25 | } 26 | 27 | if rf, ok := ret.Get(1).(func(string) error); ok { 28 | r1 = rf(subnet) 29 | } else { 30 | r1 = ret.Error(1) 31 | } 32 | 33 | return r0, r1 34 | } 35 | 36 | // FindNetworkAddress provides a mock function with given fields: address 37 | func (_m *NetworkIntf) FindNetworkAddress(address string) (string, error) { 38 | ret := _m.Called(address) 39 | 40 | var r0 string 41 | var r1 error 42 | if rf, ok := ret.Get(0).(func(string) (string, error)); ok { 43 | return rf(address) 44 | } 45 | if rf, ok := ret.Get(0).(func(string) string); ok { 46 | r0 = rf(address) 47 | } else { 48 | r0 = ret.Get(0).(string) 49 | } 50 | 51 | if rf, ok := ret.Get(1).(func(string) error); ok { 52 | r1 = rf(address) 53 | } else { 54 | r1 = ret.Error(1) 55 | } 56 | 57 | return r0, r1 58 | } 59 | 60 | // IsIpOnSubnet provides a mock function with given fields: address, subnet 61 | func (_m *NetworkIntf) IsIpOnSubnet(address string, subnet string) bool { 62 | ret := _m.Called(address, subnet) 63 | 64 | var r0 bool 65 | if rf, ok := ret.Get(0).(func(string, string) bool); ok { 66 | r0 = rf(address, subnet) 67 | } else { 68 | r0 = ret.Get(0).(bool) 69 | } 70 | 71 | return r0 72 | } 73 | 74 | // NewNetworkIntf creates a new instance of NetworkIntf. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 75 | // The first argument is typically a *testing.T value. 76 | func NewNetworkIntf(t interface { 77 | mock.TestingT 78 | Cleanup(func()) 79 | }) *NetworkIntf { 80 | mock := &NetworkIntf{} 81 | mock.Mock.Test(t) 82 | 83 | t.Cleanup(func() { mock.AssertExpectations(t) }) 84 | 85 | return mock 86 | } 87 | -------------------------------------------------------------------------------- /microceph/mocks/Runner.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v2.30.10. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import ( 6 | context "context" 7 | 8 | mock "github.com/stretchr/testify/mock" 9 | ) 10 | 11 | // Runner is an autogenerated mock type for the Runner type 12 | type Runner struct { 13 | mock.Mock 14 | } 15 | 16 | // RunCommand provides a mock function with given fields: name, arg 17 | func (_m *Runner) RunCommand(name string, arg ...string) (string, error) { 18 | _va := make([]interface{}, len(arg)) 19 | for _i := range arg { 20 | _va[_i] = arg[_i] 21 | } 22 | var _ca []interface{} 23 | _ca = append(_ca, name) 24 | _ca = append(_ca, _va...) 25 | ret := _m.Called(_ca...) 26 | 27 | var r0 string 28 | var r1 error 29 | if rf, ok := ret.Get(0).(func(string, ...string) (string, error)); ok { 30 | return rf(name, arg...) 31 | } 32 | if rf, ok := ret.Get(0).(func(string, ...string) string); ok { 33 | r0 = rf(name, arg...) 34 | } else { 35 | r0 = ret.Get(0).(string) 36 | } 37 | 38 | if rf, ok := ret.Get(1).(func(string, ...string) error); ok { 39 | r1 = rf(name, arg...) 40 | } else { 41 | r1 = ret.Error(1) 42 | } 43 | 44 | return r0, r1 45 | } 46 | 47 | // RunCommandContext provides a mock function with given fields: ctx, name, arg 48 | func (_m *Runner) RunCommandContext(ctx context.Context, name string, arg ...string) (string, error) { 49 | _va := make([]interface{}, len(arg)) 50 | for _i := range arg { 51 | _va[_i] = arg[_i] 52 | } 53 | var _ca []interface{} 54 | _ca = append(_ca, ctx, name) 55 | _ca = append(_ca, _va...) 56 | ret := _m.Called(_ca...) 57 | 58 | var r0 string 59 | var r1 error 60 | if rf, ok := ret.Get(0).(func(context.Context, string, ...string) (string, error)); ok { 61 | return rf(ctx, name, arg...) 62 | } 63 | if rf, ok := ret.Get(0).(func(context.Context, string, ...string) string); ok { 64 | r0 = rf(ctx, name, arg...) 65 | } else { 66 | r0 = ret.Get(0).(string) 67 | } 68 | 69 | if rf, ok := ret.Get(1).(func(context.Context, string, ...string) error); ok { 70 | r1 = rf(ctx, name, arg...) 71 | } else { 72 | r1 = ret.Error(1) 73 | } 74 | 75 | return r0, r1 76 | } 77 | 78 | // NewRunner creates a new instance of Runner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 79 | // The first argument is typically a *testing.T value. 80 | func NewRunner(t interface { 81 | mock.TestingT 82 | Cleanup(func()) 83 | }) *Runner { 84 | mock := &Runner{} 85 | mock.Mock.Test(t) 86 | 87 | t.Cleanup(func() { mock.AssertExpectations(t) }) 88 | 89 | return mock 90 | } 91 | -------------------------------------------------------------------------------- /microceph/mocks/StateInterface.go: -------------------------------------------------------------------------------- 1 | // Package mocks cluster state interface. Generated by mockery with a minor update as mockery confuses import paths 2 | package mocks 3 | 4 | import ( 5 | state "github.com/canonical/microcluster/v2/state" // mockery gets confused about import paths here 6 | mock "github.com/stretchr/testify/mock" 7 | ) 8 | 9 | // StateInterface is an autogenerated mock type for the StateInterface type 10 | type StateInterface struct { 11 | mock.Mock 12 | } 13 | 14 | // ClusterState provides a mock function with given fields: 15 | func (_m *StateInterface) ClusterState() state.State { 16 | ret := _m.Called() 17 | 18 | var r0 state.State 19 | if rf, ok := ret.Get(0).(func() state.State); ok { 20 | r0 = rf() 21 | } else { 22 | if ret.Get(0) != nil { 23 | r0 = ret.Get(0).(state.State) 24 | } 25 | } 26 | 27 | return r0 28 | } 29 | 30 | // NewStateInterface creates a new instance of StateInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 31 | // The first argument is typically a *testing.T value. 32 | func NewStateInterface(t interface { 33 | mock.TestingT 34 | Cleanup(func()) 35 | }) *StateInterface { 36 | mock := &StateInterface{} 37 | mock.Mock.Test(t) 38 | 39 | t.Cleanup(func() { mock.AssertExpectations(t) }) 40 | 41 | return mock 42 | } 43 | -------------------------------------------------------------------------------- /microceph/tests/testdata/ceph.client.admin.keyring: -------------------------------------------------------------------------------- 1 | [client.admin] 2 | key = AQxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx== 3 | -------------------------------------------------------------------------------- /microceph/tests/testdata/ceph.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | run dir = /var/snap/microceph/x1/run 3 | fsid = bf53b0d6-8b5b-4d88-af2b-2fd0feceba8b 4 | mon host = 10.0.1.2 5 | auth allow insecure global id reclaim = false 6 | public addr = 10.0.1.2 7 | -------------------------------------------------------------------------------- /microceph/tests/testutils.go: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import ( 4 | "os" 5 | "path" 6 | "path/filepath" 7 | "runtime" 8 | 9 | "github.com/stretchr/testify/mock" 10 | "github.com/stretchr/testify/suite" 11 | ) 12 | 13 | type BaseSuite struct { 14 | suite.Suite 15 | Tmp string 16 | } 17 | 18 | // CreateTmp creates a temporary directory for the test 19 | func (s *BaseSuite) CreateTmp() { 20 | var err error 21 | s.Tmp, err = os.MkdirTemp("", "microceph-test") 22 | if err != nil { 23 | s.T().Fatal("error creating Tmp:", err) 24 | } 25 | } 26 | 27 | // copyCephConfigs copies a test config file to the test directory 28 | func (s *BaseSuite) CopyCephConfigs() { 29 | var err error 30 | 31 | for _, d := range []string{"SNAP_DATA", "SNAP_COMMON"} { 32 | p := filepath.Join(s.Tmp, d) 33 | err = os.MkdirAll(p, 0770) 34 | if err != nil { 35 | s.T().Fatal("error creating dir:", err) 36 | } 37 | os.Setenv(d, p) 38 | } 39 | for _, d := range []string{"SNAP_DATA/conf", "SNAP_DATA/run", "SNAP_COMMON/data", "SNAP_COMMON/logs"} { 40 | p := filepath.Join(s.Tmp, d) 41 | err = os.Mkdir(p, 0770) 42 | if err != nil { 43 | s.T().Fatal("error creating dir:", err) 44 | } 45 | } 46 | 47 | for _, f := range []string{"ceph.client.admin.keyring", "ceph.conf"} { 48 | err = CopyTestConf(s.Tmp, f) 49 | if err != nil { 50 | s.T().Fatal("error copying testconf:", err) 51 | } 52 | } 53 | } 54 | 55 | // readCephConfig reads a config file from the test directory 56 | func (s *BaseSuite) ReadCephConfig(conf string) string { 57 | // Read the config file 58 | data, _ := os.ReadFile(filepath.Join(s.Tmp, "SNAP_DATA", "conf", conf)) 59 | return string(data) 60 | } 61 | 62 | func (s *BaseSuite) SetupTest() { 63 | s.CreateTmp() 64 | os.Setenv("TEST_ROOT_PATH", s.Tmp) 65 | os.MkdirAll(filepath.Join(s.Tmp, "proc"), 0775) 66 | } 67 | 68 | func (s *BaseSuite) TearDownTest() { 69 | os.RemoveAll(s.Tmp) 70 | } 71 | 72 | func CmdAny(cmd string, no int) []interface{} { 73 | any := make([]interface{}, no+1) 74 | for i := range any { 75 | any[i] = mock.Anything 76 | } 77 | any[0] = cmd 78 | return any 79 | } 80 | 81 | func CopyTestConf(dir string, conf string) error { 82 | _, tfile, _, _ := runtime.Caller(0) 83 | pkgDir := path.Join(path.Dir(tfile), "..") 84 | 85 | source, err := os.ReadFile(filepath.Join(pkgDir, "tests", "testdata", conf)) 86 | if err != nil { 87 | return err 88 | } 89 | err = os.WriteFile(filepath.Join(dir, "SNAP_DATA", "conf", conf), source, 0640) 90 | if err != nil { 91 | return err 92 | } 93 | return nil 94 | } 95 | -------------------------------------------------------------------------------- /microceph/version/version.go: -------------------------------------------------------------------------------- 1 | // Package version provides shared version information. 2 | package version 3 | 4 | var version string 5 | 6 | // Version is set by the build system. 7 | func Version() string { 8 | return version 9 | } 10 | -------------------------------------------------------------------------------- /snap/hooks/install: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -uex 3 | conf="${SNAP_DATA}/conf" 4 | mkdir -p -m 0755 "${conf}" 5 | cp "${SNAP}/share/metadata.yaml" "${conf}" 6 | -------------------------------------------------------------------------------- /snap/hooks/post-refresh: -------------------------------------------------------------------------------- 1 | install -------------------------------------------------------------------------------- /snapcraft/commands/ceph: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec "${SNAP}/bin/ceph" "$@" 3 | -------------------------------------------------------------------------------- /snapcraft/commands/common: -------------------------------------------------------------------------------- 1 | wait_for_config() { 2 | local confpath="${SNAP_DATA}/conf/ceph.conf" 3 | local search_str="^run dir = " 4 | local max_attempts=300 5 | local attempt=0 6 | 7 | sleep 1 # give microcephd startup a headstart 8 | while [ $attempt -lt $max_attempts ]; do 9 | if [ -f "${confpath}" ]; then 10 | if grep -q "${search_str}" "${confpath}"; then 11 | return 0 12 | fi 13 | fi 14 | attempt=$((attempt + 1)) 15 | sleep 2 16 | done 17 | 18 | echo "No conf found in ${confpath}" 19 | return 1 20 | } 21 | 22 | limits() { 23 | # Set NOFILE, NPROC 24 | ulimit -n 1048576 25 | ulimit -u 1048576 26 | } 27 | -------------------------------------------------------------------------------- /snapcraft/commands/daemon.start: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | export DQLITE_SOCKET="@snap.${SNAP_INSTANCE_NAME}.dqlite" 3 | 4 | exec microcephd --state-dir "${SNAP_COMMON}/state" 5 | -------------------------------------------------------------------------------- /snapcraft/commands/mds.start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . "${SNAP}/commands/common" 4 | 5 | limits 6 | 7 | wait_for_config 8 | 9 | exec ceph-mds -f --cluster ceph --id "$(hostname)" 10 | -------------------------------------------------------------------------------- /snapcraft/commands/mgr.start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . "${SNAP}/commands/common" 4 | 5 | limits 6 | 7 | wait_for_config 8 | 9 | exec ceph-mgr -f --cluster ceph --id "$(hostname)" 10 | -------------------------------------------------------------------------------- /snapcraft/commands/microceph: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec microceph --state-dir "${SNAP_COMMON}/state" "$@" 3 | -------------------------------------------------------------------------------- /snapcraft/commands/mon.start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . "${SNAP}/commands/common" 4 | 5 | limits 6 | 7 | wait_for_config 8 | 9 | exec ceph-mon -f --cluster ceph --id "$(hostname)" 10 | -------------------------------------------------------------------------------- /snapcraft/commands/osd.reload: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # shellcheck disable=SC2155 5 | export SNAP_CURRENT="$(realpath "${SNAP_DATA}/..")/current" 6 | 7 | PID=$(cat "${SNAP_CURRENT}/run/ceph-osd.pid") 8 | kill -HUP "${PID}" 9 | exit 0 10 | -------------------------------------------------------------------------------- /snapcraft/commands/osd.start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # shellcheck disable=SC2155 5 | export SNAP_CURRENT="$(realpath "${SNAP_DATA}/..")/current" 6 | echo $$ > "${SNAP_CURRENT}/run/ceph-osd.pid" 7 | cd "${SNAP}" 8 | 9 | . "${SNAP}/commands/common" 10 | 11 | limits 12 | 13 | maybe_unlock() { 14 | dev="${1:?missing}" 15 | osdid="${2:?missing}" 16 | key="${3:?missing}" 17 | 18 | luksname="luksosd-${osdid}" 19 | 20 | [ -b "/dev/mapper/$luksname" ] && return 21 | 22 | if cryptsetup isLuks "$dev" ; then 23 | echo "${key}" | cryptsetup luksOpen "$dev" "$luksname" --key-file - --keyfile-size 128 24 | fi 25 | } 26 | 27 | get_key() { 28 | osdid="${1:?missing}" 29 | ceph config-key get "microceph:osd.${osdid}/key" 30 | } 31 | 32 | is_osd_running() { 33 | osdid="${1:?missing}" 34 | 35 | skt="${SNAP_CURRENT}/run/ceph-osd.${osdid}.asok" 36 | 37 | nc -z -U "${skt}" >/dev/null 2>&1 38 | } 39 | 40 | spawn() { 41 | for i in "${SNAP_COMMON}/data/osd"/*; do 42 | filename="$(basename "${i}")" 43 | [ -z "$filename" ] && continue 44 | 45 | nr="${filename##ceph-}" 46 | [ -z "$nr" ] && continue 47 | 48 | [ ! -e "${i}/ready" ] && continue 49 | 50 | is_osd_running "${nr}" && continue 51 | 52 | if [ -b "${i}/unencrypted" ] ; then 53 | maybe_unlock "${i}/unencrypted" "${nr}" "$( get_key "${nr}" )" 54 | fi 55 | 56 | ceph-osd --cluster ceph --id "${nr}" 57 | done 58 | 59 | wait 60 | sleep infinity & 61 | wait 62 | } 63 | trap spawn HUP 64 | 65 | wait_for_config 66 | 67 | spawn 68 | exit 0 69 | -------------------------------------------------------------------------------- /snapcraft/commands/rados: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec "${SNAP}/bin/rados" "$@" 4 | -------------------------------------------------------------------------------- /snapcraft/commands/radosgw-admin: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec "${SNAP}/bin/radosgw-admin" "$@" 3 | -------------------------------------------------------------------------------- /snapcraft/commands/rbd: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec "${SNAP}/bin/rbd" "$@" 3 | -------------------------------------------------------------------------------- /snapcraft/commands/rbd-mirror.start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . "${SNAP}/commands/common" 4 | 5 | limits 6 | 7 | exec rbd-mirror -f --cluster ceph --id "rbd-mirror.$(hostname)" 8 | -------------------------------------------------------------------------------- /snapcraft/commands/rgw.start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . "${SNAP}/commands/common" 4 | 5 | limits 6 | 7 | wait_for_config 8 | 9 | conf="${SNAP_DATA}/conf/radosgw.conf" 10 | 11 | if [ -n "${conf}" ] ; then 12 | exec radosgw -f --cluster ceph --name "client.radosgw.gateway" -c "${conf}" 13 | fi 14 | 15 | -------------------------------------------------------------------------------- /tests/hurl/maintenance-put-failed.hurl: -------------------------------------------------------------------------------- 1 | PUT http://localhost/1.0/ops/maintenance/microceph-0 2 | { 3 | "Status": "unknown" 4 | } 5 | HTTP 400 6 | [Asserts] 7 | jsonpath "$.error_code" == 400 8 | jsonpath "$.error" == "unknown status encounter: 'unknown', can only be 'maintenance' or 'non-maintenance'" 9 | -------------------------------------------------------------------------------- /tests/hurl/services-mon.hurl: -------------------------------------------------------------------------------- 1 | GET http://localhost/1.0/services/mon 2 | HTTP 200 3 | [Asserts] 4 | jsonpath "$.metadata.addresses" count == 1 5 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 1.6 3 | envlist = docs 4 | skipsdist = True 5 | 6 | [testenv] 7 | skip_install = True 8 | basepython = python3 9 | usedevelop = True 10 | deps = -r{toxinidir}/docs/requirements.txt 11 | 12 | [testenv:venv] 13 | commands = {posargs} 14 | 15 | [testenv:spelling] 16 | commands = sphinx-build -j auto -b spelling docs _build 17 | 18 | [testenv:docs] 19 | commands = sphinx-build -j auto docs _build 20 | 21 | [testenv:serve] 22 | commands = sphinx-autobuild docs _build --------------------------------------------------------------------------------