├── .deepsource.toml
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ ├── ansible-lint.yml
│ ├── check-nbsp.yml
│ ├── defaults.yml
│ ├── flake8.yml
│ ├── pytest.yml
│ ├── signed-off.yml
│ └── stale.yml
├── .gitignore
├── .mergify.yml
├── .readthedocs.yaml
├── CONTRIBUTING.md
├── LICENSE
├── Makefile
├── README.rst
├── Vagrantfile
├── ansible.cfg
├── ceph-ansible.spec.in
├── contrib
├── backport_to_stable_branch.sh
├── push-roles-to-ansible-galaxy.sh
├── rundep.sample
├── rundep_installer.sh
├── snapshot_vms.sh
├── vagrant_variables.yml.atomic
├── vagrant_variables.yml.linode
└── vagrant_variables.yml.openstack
├── dashboard.yml
├── docs
├── .gitignore
├── Makefile
├── source
│ ├── _static
│ │ └── .empty
│ ├── _templates
│ │ └── .empty
│ ├── conf.py
│ ├── day-2
│ │ ├── osds.rst
│ │ ├── purge.rst
│ │ └── upgrade.rst
│ ├── dev
│ │ └── index.rst
│ ├── glossary.rst
│ ├── index.rst
│ ├── installation
│ │ ├── containerized.rst
│ │ ├── methods.rst
│ │ └── non-containerized.rst
│ ├── osds
│ │ └── scenarios.rst
│ ├── rbdmirror
│ │ └── index.rst
│ └── testing
│ │ ├── development.rst
│ │ ├── glossary.rst
│ │ ├── index.rst
│ │ ├── layout.rst
│ │ ├── modifying.rst
│ │ ├── running.rst
│ │ ├── scenarios.rst
│ │ ├── tests.rst
│ │ └── tox.rst
└── tox.ini
├── dummy-ansible-hosts
├── generate_group_vars_sample.sh
├── group_vars
├── all.yml.sample
├── clients.yml.sample
├── exporters.yml.sample
├── mdss.yml.sample
├── mgrs.yml.sample
├── mons.yml.sample
├── nfss.yml.sample
├── osds.yml.sample
├── rbdmirrors.yml.sample
├── rgwloadbalancers.yml.sample
└── rgws.yml.sample
├── infrastructure-playbooks
├── README.md
├── add-mon.yml
├── backup-and-restore-ceph-files.yml
├── ceph-keys.yml
├── cephadm-adopt.yml
├── cephadm.yml
├── docker-to-podman.yml
├── gather-ceph-logs.yml
├── lv-create.yml
├── lv-teardown.yml
├── purge-cluster.yml
├── purge-container-cluster.yml
├── purge-dashboard.yml
├── rgw-add-users-buckets.yml
├── rolling_update.yml
├── shrink-mds.yml
├── shrink-mgr.yml
├── shrink-mon.yml
├── shrink-osd.yml
├── shrink-rbdmirror.yml
├── shrink-rgw.yml
├── storage-inventory.yml
├── switch-from-non-containerized-to-containerized-ceph-daemons.yml
├── take-over-existing-cluster.yml
├── untested-by-ci
│ ├── cluster-maintenance.yml
│ ├── cluster-os-migration.yml
│ ├── make-osd-partitions.yml
│ ├── migrate-journal-to-ssd.yml
│ ├── purge-multisite.yml
│ ├── recover-osds-after-ssd-journal-failure.yml
│ └── replace-osd.yml
└── vars
│ └── lv_vars.yaml.sample
├── library
├── __init__.py
├── ceph_add_users_buckets.py
├── ceph_authtool.py
├── ceph_config.py
├── ceph_crush.py
├── ceph_crush_rule.py
├── ceph_crush_rule_info.py
├── ceph_dashboard_user.py
├── ceph_ec_profile.py
├── ceph_fs.py
├── ceph_key.py
├── ceph_key_info.py
├── ceph_mgr_module.py
├── ceph_orch_apply.py
├── ceph_osd.py
├── ceph_osd_flag.py
├── ceph_pool.py
├── ceph_volume.py
├── ceph_volume_simple_activate.py
├── ceph_volume_simple_scan.py
├── cephadm_adopt.py
├── cephadm_bootstrap.py
├── radosgw_caps.py
├── radosgw_realm.py
├── radosgw_user.py
├── radosgw_zone.py
└── radosgw_zonegroup.py
├── module_utils
├── __init__.py
└── ca_common.py
├── plugins
├── callback
│ └── installer_checkpoint.py
└── filter
│ ├── __init__.py
│ ├── dict2dict.py
│ └── ipaddrs_in_ranges.py
├── profiles
├── rgw-keystone-v2
├── rgw-keystone-v3
├── rgw-radosgw-static-website
└── rgw-usage-log
├── raw_install_python.yml
├── requirements.txt
├── requirements.yml
├── roles
├── ceph-client
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── create_users_keys.yml
│ │ ├── main.yml
│ │ └── pre_requisite.yml
├── ceph-common
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── cephstable.asc
│ │ └── cephstablerhcs.asc
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── configure_cluster_name.yml
│ │ ├── configure_memory_allocator.yml
│ │ ├── configure_repository.yml
│ │ ├── create_rbd_client_dir.yml
│ │ ├── installs
│ │ │ ├── configure_debian_repository_installation.yml
│ │ │ ├── configure_redhat_local_installation.yml
│ │ │ ├── configure_redhat_repository_installation.yml
│ │ │ ├── configure_suse_repository_installation.yml
│ │ │ ├── debian_community_repository.yml
│ │ │ ├── debian_custom_repository.yml
│ │ │ ├── debian_dev_repository.yml
│ │ │ ├── debian_uca_repository.yml
│ │ │ ├── install_debian_packages.yml
│ │ │ ├── install_on_clear.yml
│ │ │ ├── install_on_debian.yml
│ │ │ ├── install_redhat_packages.yml
│ │ │ ├── install_suse_packages.yml
│ │ │ ├── redhat_community_repository.yml
│ │ │ ├── redhat_custom_repository.yml
│ │ │ ├── redhat_dev_repository.yml
│ │ │ └── suse_obs_repository.yml
│ │ ├── main.yml
│ │ └── selinux.yml
│ └── vars
│ │ └── main.yml
├── ceph-config
│ ├── LICENSE
│ ├── README.md
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── create_ceph_initial_dirs.yml
│ │ ├── main.yml
│ │ └── rgw_systemd_environment_file.yml
│ └── templates
│ │ └── ceph.conf.j2
├── ceph-container-common
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ceph.target
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── fetch_image.yml
│ │ ├── main.yml
│ │ ├── prerequisites.yml
│ │ ├── registry.yml
│ │ └── release.yml
├── ceph-container-engine
│ ├── README.md
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ └── pre_requisites
│ │ │ ├── debian_prerequisites.yml
│ │ │ └── prerequisites.yml
│ ├── templates
│ │ └── docker-proxy.conf.j2
│ └── vars
│ │ ├── CentOS-8.yml
│ │ ├── CentOS-9.yml
│ │ ├── Debian.yml
│ │ ├── RedHat-8.yml
│ │ ├── RedHat.yml
│ │ └── Ubuntu.yml
├── ceph-crash
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ └── systemd.yml
│ └── templates
│ │ └── ceph-crash.service.j2
├── ceph-dashboard
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── configure_dashboard.yml
│ │ ├── configure_grafana_layouts.yml
│ │ └── main.yml
├── ceph-defaults
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
├── ceph-exporter
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ └── systemd.yml
│ └── templates
│ │ └── ceph-exporter.service.j2
├── ceph-facts
│ ├── README.md
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── container_binary.yml
│ │ ├── devices.yml
│ │ ├── facts.yml
│ │ ├── get_def_crush_rule_name.yml
│ │ ├── grafana.yml
│ │ ├── main.yml
│ │ ├── set_monitor_address.yml
│ │ └── set_radosgw_address.yml
├── ceph-fetch-keys
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── ceph-grafana
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── configure_grafana.yml
│ │ ├── main.yml
│ │ ├── setup_container.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── dashboards-ceph-dashboard.yml.j2
│ │ ├── datasources-ceph-dashboard.yml.j2
│ │ ├── grafana-server.service.j2
│ │ └── grafana.ini.j2
├── ceph-handler
│ ├── LICENSE
│ ├── README.md
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── check_running_cluster.yml
│ │ ├── check_running_containers.yml
│ │ ├── check_socket_non_container.yml
│ │ ├── handler_crash.yml
│ │ ├── handler_exporter.yml
│ │ ├── handler_mdss.yml
│ │ ├── handler_mgrs.yml
│ │ ├── handler_mons.yml
│ │ ├── handler_nfss.yml
│ │ ├── handler_osds.yml
│ │ ├── handler_rbdmirrors.yml
│ │ ├── handler_rgws.yml
│ │ └── main.yml
│ └── templates
│ │ ├── restart_mds_daemon.sh.j2
│ │ ├── restart_mgr_daemon.sh.j2
│ │ ├── restart_mon_daemon.sh.j2
│ │ ├── restart_nfs_daemon.sh.j2
│ │ ├── restart_osd_daemon.sh.j2
│ │ ├── restart_rbd_mirror_daemon.sh.j2
│ │ └── restart_rgw_daemon.sh.j2
├── ceph-infra
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── configure_firewall.yml
│ │ ├── dashboard_firewall.yml
│ │ ├── main.yml
│ │ └── setup_ntp.yml
│ └── templates
│ │ └── logrotate.conf.j2
├── ceph-mds
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ceph-mds.target
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── common.yml
│ │ ├── containerized.yml
│ │ ├── create_mds_filesystems.yml
│ │ ├── main.yml
│ │ ├── non_containerized.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── ceph-mds.service.d-overrides.j2
│ │ └── ceph-mds.service.j2
├── ceph-mgr
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ceph-mgr.target
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── common.yml
│ │ ├── main.yml
│ │ ├── mgr_modules.yml
│ │ ├── pre_requisite.yml
│ │ ├── start_mgr.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── ceph-mgr.service.d-overrides.j2
│ │ └── ceph-mgr.service.j2
├── ceph-mon
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ceph-mon.target
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── ceph_keys.yml
│ │ ├── deploy_monitors.yml
│ │ ├── main.yml
│ │ ├── secure_cluster.yml
│ │ ├── start_monitor.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── ceph-mon.service.d-overrides.j2
│ │ └── ceph-mon.service.j2
├── ceph-nfs
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── create_rgw_nfs_user.yml
│ │ ├── main.yml
│ │ ├── pre_requisite_container.yml
│ │ ├── pre_requisite_non_container.yml
│ │ ├── pre_requisite_non_container_debian.yml
│ │ ├── pre_requisite_non_container_red_hat.yml
│ │ ├── start_nfs.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── ceph-nfs.service.j2
│ │ ├── ganesha.conf.j2
│ │ ├── idmap.conf.j2
│ │ └── systemd-run.j2
├── ceph-node-exporter
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── setup_container.yml
│ │ └── systemd.yml
│ └── templates
│ │ └── node_exporter.service.j2
├── ceph-osd
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ceph-osd.target
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── common.yml
│ │ ├── crush_rules.yml
│ │ ├── main.yml
│ │ ├── scenarios
│ │ │ ├── lvm-batch.yml
│ │ │ └── lvm.yml
│ │ ├── start_osds.yml
│ │ ├── system_tuning.yml
│ │ └── systemd.yml
│ ├── templates
│ │ ├── ceph-osd.service.d-overrides.j2
│ │ ├── ceph-osd.service.j2
│ │ ├── systemd-run.j2
│ │ └── tmpfiles_hugepage.j2
│ └── vars
│ │ └── main.yml
├── ceph-prometheus
│ ├── files
│ │ └── ceph_dashboard.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── setup_container.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── alertmanager.service.j2
│ │ ├── alertmanager.yml.j2
│ │ ├── prometheus.service.j2
│ │ └── prometheus.yml.j2
├── ceph-rbd-mirror
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ceph-rbd-mirror.target
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── configure_mirroring.yml
│ │ ├── main.yml
│ │ ├── start_container_rbd_mirror.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── ceph-rbd-mirror.service.d-overrides.j2
│ │ └── ceph-rbd-mirror.service.j2
├── ceph-rgw-loadbalancer
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── pre_requisite.yml
│ │ └── start_rgw_loadbalancer.yml
│ └── templates
│ │ ├── haproxy.cfg.j2
│ │ └── keepalived.conf.j2
├── ceph-rgw
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ceph-radosgw.target
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── common.yml
│ │ ├── main.yml
│ │ ├── openstack-keystone.yml
│ │ ├── pre_requisite.yml
│ │ ├── rgw_create_pools.yml
│ │ ├── start_docker_rgw.yml
│ │ ├── start_radosgw.yml
│ │ └── systemd.yml
│ └── templates
│ │ ├── ceph-radosgw.service.j2
│ │ └── ceph-rgw.service.d-overrides.j2
└── ceph-validate
│ ├── meta
│ └── main.yml
│ └── tasks
│ ├── check_devices.yml
│ ├── check_eth_rgw.yml
│ ├── check_ipaddr_mon.yml
│ ├── check_nfs.yml
│ ├── check_pools.yml
│ ├── check_rbdmirror.yml
│ ├── check_repository.yml
│ ├── check_rgw_pools.yml
│ ├── check_system.yml
│ └── main.yml
├── site-container.yml.sample
├── site.yml.sample
├── test.yml
├── tests
├── README.md
├── README.rst
├── conftest.py
├── functional
│ ├── .gitignore
│ ├── add-mdss
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ ├── hosts-2
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ ├── hosts-2
│ │ └── vagrant_variables.yml
│ ├── add-mgrs
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ ├── hosts-2
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ ├── hosts-2
│ │ └── vagrant_variables.yml
│ ├── add-mons
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ ├── hosts-2
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ ├── hosts-2
│ │ └── vagrant_variables.yml
│ ├── add-osds
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ ├── hosts-2
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ ├── hosts-2
│ │ └── vagrant_variables.yml
│ ├── add-rbdmirrors
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ ├── hosts-2
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ ├── hosts-2
│ │ └── vagrant_variables.yml
│ ├── add-rgws
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ ├── hosts-2
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ └── rgws
│ │ ├── hosts
│ │ ├── hosts-2
│ │ └── vagrant_variables.yml
│ ├── all-in-one
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── all_daemons
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── clients
│ │ │ │ ├── iscsigws
│ │ │ │ ├── mons
│ │ │ │ ├── osds
│ │ │ │ └── rgws
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── clients
│ │ │ ├── iscsigws
│ │ │ ├── mons
│ │ │ ├── nfss
│ │ │ ├── osds
│ │ │ └── rgws
│ │ ├── hosts
│ │ ├── hosts-switch-to-containers
│ │ └── vagrant_variables.yml
│ ├── all_daemons_ipv6
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── clients
│ │ │ │ ├── iscsigws
│ │ │ │ ├── mons
│ │ │ │ ├── osds
│ │ │ │ └── rgws
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── clients
│ │ │ ├── iscsigws
│ │ │ ├── mons
│ │ │ ├── nfss
│ │ │ ├── osds
│ │ │ └── rgws
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── cephadm
│ │ ├── Vagrantfile
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── collect-logs.yml
│ ├── collocation
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── clients
│ │ │ │ ├── osds
│ │ │ │ └── rgws
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── clients
│ │ │ ├── osds
│ │ │ └── rgws
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── dev_setup.yml
│ ├── docker2podman
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── clients
│ │ │ ├── iscsigws
│ │ │ ├── mons
│ │ │ ├── osds
│ │ │ └── rgws
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── external_clients
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── inventory
│ │ │ │ ├── external_clients-hosts
│ │ │ │ ├── group_vars
│ │ │ │ │ ├── all
│ │ │ │ │ └── clients
│ │ │ │ └── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── inventory
│ │ │ ├── external_clients-hosts
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ └── clients
│ │ │ └── hosts
│ │ └── vagrant_variables.yml
│ ├── external_clients_admin_key.yml
│ ├── infra_lv_create
│ │ ├── Vagrantfile
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── lvm-auto-discovery
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── lvm-batch
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── lvm-osds
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── lvm_setup.yml
│ ├── migrate_ceph_disk_to_ceph_volume
│ │ ├── Vagrantfile
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── podman
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── clients
│ │ │ ├── iscsigws
│ │ │ ├── mons
│ │ │ ├── osds
│ │ │ └── rgws
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── rbd_map_devices.yml
│ ├── rbdmirror.yml
│ ├── rbdmirror
│ │ ├── Vagrantfile
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ ├── secondary
│ │ │ │ ├── Vagrantfile
│ │ │ │ ├── group_vars
│ │ │ │ │ └── all
│ │ │ │ ├── hosts
│ │ │ │ └── vagrant_variables.yml
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ └── all
│ │ ├── hosts
│ │ ├── secondary
│ │ │ ├── Vagrantfile
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ └── vagrant_variables.yml
│ ├── reboot.yml
│ ├── setup.yml
│ ├── shrink_mds
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── mons
│ │ │ │ └── osds
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── mons
│ │ │ └── osds
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── shrink_mgr
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── mons
│ │ │ │ └── osds
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── mons
│ │ │ └── osds
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── shrink_mon
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── mons
│ │ │ │ └── osds
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── mons
│ │ │ └── osds
│ │ ├── hosts
│ │ ├── hosts-switch-to-containers
│ │ └── vagrant_variables.yml
│ ├── shrink_osd
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ └── osds
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── shrink_rbdmirror
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── mons
│ │ │ │ └── osds
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── mons
│ │ │ └── osds
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── shrink_rgw
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── mons
│ │ │ │ ├── osds
│ │ │ │ └── rgws
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── mons
│ │ │ ├── osds
│ │ │ └── rgws
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ ├── subset_update
│ │ ├── Vagrantfile
│ │ ├── ceph-override.json
│ │ ├── container
│ │ │ ├── Vagrantfile
│ │ │ ├── ceph-override.json
│ │ │ ├── group_vars
│ │ │ │ ├── all
│ │ │ │ ├── clients
│ │ │ │ ├── iscsigws
│ │ │ │ ├── mons
│ │ │ │ ├── osds
│ │ │ │ └── rgws
│ │ │ ├── hosts
│ │ │ └── vagrant_variables.yml
│ │ ├── group_vars
│ │ │ ├── all
│ │ │ ├── clients
│ │ │ ├── iscsigws
│ │ │ ├── mons
│ │ │ ├── nfss
│ │ │ ├── osds
│ │ │ └── rgws
│ │ ├── hosts
│ │ └── vagrant_variables.yml
│ └── tests
│ │ ├── __init__.py
│ │ ├── grafana
│ │ └── test_grafana.py
│ │ ├── mds
│ │ ├── __init__.py
│ │ └── test_mds.py
│ │ ├── mgr
│ │ ├── __init__.py
│ │ └── test_mgr.py
│ │ ├── mon
│ │ ├── __init__.py
│ │ └── test_mons.py
│ │ ├── nfs
│ │ └── test_nfs_ganesha.py
│ │ ├── node-exporter
│ │ └── test_node_exporter.py
│ │ ├── osd
│ │ ├── __init__.py
│ │ └── test_osds.py
│ │ ├── rbd-mirror
│ │ ├── __init__.py
│ │ └── test_rbd_mirror.py
│ │ ├── rgw
│ │ ├── __init__.py
│ │ └── test_rgw.py
│ │ └── test_install.py
├── inventories
│ └── single-machine.yml
├── library
│ ├── ca_test_common.py
│ ├── test_ceph_crush.py
│ ├── test_ceph_crush_rule.py
│ ├── test_ceph_dashboard_user.py
│ ├── test_ceph_ec_profile.py
│ ├── test_ceph_fs.py
│ ├── test_ceph_key.py
│ ├── test_ceph_mgr_module.py
│ ├── test_ceph_osd.py
│ ├── test_ceph_osd_flag.py
│ ├── test_ceph_pool.py
│ ├── test_ceph_volume.py
│ ├── test_ceph_volume_simple_activate.py
│ ├── test_ceph_volume_simple_scan.py
│ ├── test_cephadm_adopt.py
│ ├── test_cephadm_bootstrap.py
│ ├── test_radosgw_caps.py
│ ├── test_radosgw_realm.py
│ ├── test_radosgw_user.py
│ ├── test_radosgw_zone.py
│ └── test_radosgw_zonegroup.py
├── module_utils
│ └── test_ca_common.py
├── plugins
│ └── filter
│ │ └── test_ipaddrs_in_ranges.py
├── pytest.ini
├── requirements.txt
└── scripts
│ ├── generate_ssh_config.sh
│ ├── vagrant_up.sh
│ └── workflows
│ ├── defaults.sh
│ └── signed-off.sh
├── tox-cephadm.ini
├── tox-docker2podman.ini
├── tox-external_clients.ini
├── tox-podman.ini
├── tox-rbdmirror.ini
├── tox-shrink_osd.ini
├── tox-subset_update.ini
├── tox-update.ini
├── tox.ini
└── vagrant_variables.yml.sample
/.deepsource.toml:
--------------------------------------------------------------------------------
1 | version = 1
2 |
3 | test_patterns = ["tests/**"]
4 |
5 | exclude_patterns = [
6 | "roles/**",
7 | "profiles/**",
8 | "infrastructure-playbooks/**",
9 | "group_vars/**",
10 | "contrib/**"
11 | ]
12 |
13 | [[analyzers]]
14 | name = "python"
15 | enabled = true
16 |
17 | [analyzers.meta]
18 | runtime_version = "3.x.x"
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 |
5 | ---
6 |
7 |
10 |
11 |
12 | **Bug Report**
13 |
14 | What happened:
15 |
16 | What you expected to happen:
17 |
18 | How to reproduce it (minimal and precise):
19 |
20 |
21 | Share your group_vars files, inventory and **full** ceph-ansibe log
22 |
23 | **Environment**:
24 | * OS (e.g. from /etc/os-release):
25 | * Kernel (e.g. `uname -a`):
26 | * Docker version if applicable (e.g. `docker version`):
27 | * Ansible version (e.g. `ansible-playbook --version`):
28 | * ceph-ansible version (e.g. `git head or tag or stable branch`):
29 | * Ceph version (e.g. `ceph -v`):
30 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 |
5 | ---
6 |
7 | **Is your feature request related to a problem? Please describe.**
8 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
9 |
10 | **Describe the solution you'd like**
11 | A clear and concise description of what you want to happen.
12 |
13 | **Describe alternatives you've considered**
14 | A clear and concise description of any alternative solutions or features you've considered.
15 |
16 | **Additional context**
17 | Add any other context or screenshots about the feature request here.
18 |
--------------------------------------------------------------------------------
/.github/workflows/ansible-lint.yml:
--------------------------------------------------------------------------------
1 | name: ansible-lint
2 | on: [pull_request]
3 | jobs:
4 | build:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v2
8 | - name: Setup python
9 | uses: actions/setup-python@v2
10 | with:
11 | python-version: '3.10'
12 | architecture: x64
13 | - run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint netaddr
14 | - run: ansible-galaxy install -r requirements.yml
15 | - run: ansible-lint -x 'yaml[line-length],role-name,run-once' -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site.yml.sample dashboard.yml
16 | - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample site-container.yml.sample dashboard.yml infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv
17 |
--------------------------------------------------------------------------------
/.github/workflows/check-nbsp.yml:
--------------------------------------------------------------------------------
1 | name: check-nbsp
2 | on: [pull_request]
3 | jobs:
4 | build:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v2
8 | - run: if [[ -n $(grep --exclude-dir=.git -P "\xa0" -r .) ]]; then echo 'NBSP characters found'; exit 1; fi
9 |
--------------------------------------------------------------------------------
/.github/workflows/defaults.yml:
--------------------------------------------------------------------------------
1 | name: defaults
2 | on: [pull_request]
3 | jobs:
4 | build:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v2
8 | with:
9 | fetch-depth: 0
10 | - run: "${GITHUB_WORKSPACE}/tests/scripts/workflows/defaults.sh"
--------------------------------------------------------------------------------
/.github/workflows/flake8.yml:
--------------------------------------------------------------------------------
1 | name: flake8
2 | on:
3 | pull_request:
4 | paths:
5 | - 'library/**.py'
6 | - 'module_utils/**.py'
7 | - 'plugins/filter/**.py'
8 | - 'tests/conftest.py'
9 | - 'tests/library/**.py'
10 | - 'tests/module_utils/**.py'
11 | - 'tests/plugins/filter/**.py'
12 | - 'tests/functional/tests/**.py'
13 | jobs:
14 | build:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: actions/checkout@v2
18 | - name: Setup python
19 | uses: actions/setup-python@v2
20 | with:
21 | python-version: '3.10'
22 | architecture: x64
23 | - run: pip install flake8
24 | - run: flake8 --max-line-length 160 ./library/ ./module_utils/ ./plugins/filter/ ./tests/library/ ./tests/module_utils/ ./tests/plugins/filter/ ./tests/conftest.py ./tests/functional/tests/
25 |
--------------------------------------------------------------------------------
/.github/workflows/signed-off.yml:
--------------------------------------------------------------------------------
1 | name: signed-off
2 | on: [pull_request]
3 | jobs:
4 | build:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v2
8 | with:
9 | fetch-depth: 0
10 | - run: "${GITHUB_WORKSPACE}/tests/scripts/workflows/signed-off.sh"
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | *.vdi
3 | *.keyring
4 | fetch/*
5 | /vagrant_variables.yml
6 | group_vars/all
7 | group_vars/mons
8 | group_vars/osds
9 | group_vars/mdss
10 | group_vars/rgws
11 | group_vars/*.yml
12 | *.DS_Store
13 | /*.yml
14 | *.pyc
15 | *.sw?
16 | .tox
17 | ceph-ansible.spec
18 | *.retry
19 | *.pytest_cache
20 | !.travis.yml
21 | !.mergify.yml
22 | !raw_install_python.yml
23 | !requirements.yml
24 | .vscode/
25 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: "ubuntu-22.04"
5 | tools:
6 | python: "3.9"
7 |
8 | sphinx:
9 | # Path to your Sphinx configuration file.
10 | configuration: docs/source/conf.py
11 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Ceph Ansible
2 | ==============
3 |
4 | The project is still maintained for the time being but it is encouraged to migrate to `cephadm `_.
5 |
6 | Ansible playbooks for Ceph, the distributed object, block, and file storage platform.
7 |
8 | Please refer to our hosted documentation here: https://docs.ceph.com/projects/ceph-ansible/en/latest/
9 | You can view documentation for our ``stable-*`` branches by substituting ``main`` in the link
10 | above for the name of the branch. For example: https://docs.ceph.com/projects/ceph-ansible/en/stable-8.0/
11 |
--------------------------------------------------------------------------------
/contrib/rundep.sample:
--------------------------------------------------------------------------------
1 | #Package lines can be commented out with '#'
2 | #
3 | #boost-atomic
4 | #boost-chrono
5 | #boost-date-time
6 | #boost-iostreams
7 | #boost-program
8 | #boost-random
9 | #boost-regex
10 | #boost-system
11 | #boost-thread
12 | #bzip2-libs
13 | #cyrus-sasl-lib
14 | #expat
15 | #fcgi
16 | #fuse-libs
17 | #glibc
18 | #keyutils-libs
19 | #leveldb
20 | #libaio
21 | #libatomic_ops
22 | #libattr
23 | #libblkid
24 | #libcap
25 | #libcom_err
26 | #libcurl
27 | #libgcc
28 | #libicu
29 | #libidn
30 | #libnghttp2
31 | #libpsl
32 | #libselinux
33 | #libssh2
34 | #libstdc++
35 | #libunistring
36 | #nss-softokn-freebl
37 | #openldap
38 | #openssl-libs
39 | #pcre
40 | #python-nose
41 | #python-sphinx
42 | #snappy
43 | #systemd-libs
44 | #zlib
45 |
--------------------------------------------------------------------------------
/contrib/rundep_installer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | #
3 | # Copyright (C) 2014, 2015 Red Hat
4 | #
5 | # Author: Daniel Lin
6 | #
7 | # This library is free software; you can redistribute it and/or
8 | # modify it under the terms of the GNU Lesser General Public
9 | # License as published by the Free Software Foundation; either
10 | # version 2.1 of the License, or (at your option) any later version.
11 | #
12 |
13 | if test -f /etc/redhat-release ; then
14 | PACKAGE_INSTALLER=yum
15 | elif type apt-get > /dev/null 2>&1 ; then
16 | PACKAGE_INSTALLER=apt-get
17 | else
18 | echo "ERROR: Package Installer could not be determined"
19 | exit 1
20 | fi
21 |
22 | while read p; do
23 | if [[ $p =~ ^#.* ]] ; then
24 | continue
25 | fi
26 | $PACKAGE_INSTALLER install $p -y
27 | done < $1
28 |
--------------------------------------------------------------------------------
/contrib/vagrant_variables.yml.atomic:
--------------------------------------------------------------------------------
1 | ---
2 | # DEPLOY CONTAINERIZED DAEMONS
3 | docker: true
4 |
5 | # DEFINE THE NUMBER OF VMS TO RUN
6 | mon_vms: 1
7 | osd_vms: 1
8 | mds_vms: 0
9 | rgw_vms: 0
10 | nfs_vms: 0
11 | rbd_mirror_vms: 0
12 | client_vms: 0
13 | mgr_vms: 0
14 |
15 | # SUBNETS TO USE FOR THE VMS
16 | public_subnet: 192.168.0
17 | cluster_subnet: 192.168.1
18 |
19 | # MEMORY
20 | memory: 1024
21 |
22 | disks: [ '/dev/sda', '/dev/sdb' ]
23 |
24 | eth: 'enp0s8'
25 | vagrant_box: centos/atomic-host
26 | # The sync directory changes based on vagrant box
27 | # Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
28 | vagrant_sync_dir: /home/vagrant/sync
29 |
30 | skip_tags: 'with_pkg'
31 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = ceph-ansible
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/source/_static/.empty:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/docs/source/_static/.empty
--------------------------------------------------------------------------------
/docs/source/_templates/.empty:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/docs/source/_templates/.empty
--------------------------------------------------------------------------------
/docs/source/day-2/purge.rst:
--------------------------------------------------------------------------------
1 | Purging the cluster
2 | -------------------
3 |
4 | ceph-ansible provides two playbooks in ``infrastructure-playbooks`` for purging a Ceph cluster: ``purge-cluster.yml`` and ``purge-container-cluster.yml``.
5 |
6 | The names are pretty self-explanatory, ``purge-cluster.yml`` is intended to purge a non-containerized cluster whereas ``purge-container-cluster.yml`` is to purge a containerized cluster.
7 |
8 | example:
9 |
10 | .. code-block:: shell
11 |
12 | $ ansible-playbook -vv -i hosts infrastructure-playbooks/purge-container-cluster.yml
13 |
14 | .. note::
15 | These playbooks aren't intended to be run with the ``--limit`` option.
--------------------------------------------------------------------------------
/docs/source/day-2/upgrade.rst:
--------------------------------------------------------------------------------
1 | Upgrading the ceph cluster
2 | --------------------------
3 |
4 | ceph-ansible provides a playbook in ``infrastructure-playbooks`` for upgrading a Ceph cluster: ``rolling_update.yml``.
5 |
6 | This playbook could be used for both minor upgrades (X.Y to X.Z) or major upgrades (X to Y).
7 |
8 | Before running a major upgrade you need to update the ceph-ansible version first.
9 |
10 | example:
11 |
12 | .. code-block:: shell
13 |
14 | $ ansible-playbook -vv -i hosts infrastructure-playbooks/rolling_update.yml
15 |
16 | .. note::
17 | This playbook isn't intended to be run with the ``--limit`` ansible option.
18 |
--------------------------------------------------------------------------------
/docs/source/glossary.rst:
--------------------------------------------------------------------------------
1 | Glossary
2 | ========
3 |
4 | .. toctree::
5 | :maxdepth: 3
6 | :caption: Contents:
7 |
8 | index
9 | testing/glossary
10 |
--------------------------------------------------------------------------------
/docs/source/installation/methods.rst:
--------------------------------------------------------------------------------
1 | Installation methods
2 | ====================
3 |
4 | ceph-ansible can deploy Ceph either in a non-containerized context (via packages) or in a containerized context using ceph-container images.
5 |
6 | .. toctree::
7 | :maxdepth: 1
8 |
9 | non-containerized
10 | containerized
11 |
12 | The difference here is that you don't have the rbd command on the host when using the containerized deployment so everything related to ceph needs to be executed within a container. So in the case there is software like e.g. Open Nebula which requires that the rbd command is accessible directly on the host (non-containerized) then you have to install the rbd command by yourself on those servers outside of containers (or make sure that this software somehow runs within containers as well and that it can access rbd).
13 |
--------------------------------------------------------------------------------
/docs/source/testing/development.rst:
--------------------------------------------------------------------------------
1 | .. _development:
2 |
3 | ceph-ansible testing for development
4 | ====================================
5 |
--------------------------------------------------------------------------------
/docs/source/testing/glossary.rst:
--------------------------------------------------------------------------------
1 | Glossary
2 | ========
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | index
8 | running.rst
9 | development.rst
10 | scenarios.rst
11 | modifying.rst
12 | layout.rst
13 | tests.rst
14 | tox.rst
15 |
--------------------------------------------------------------------------------
/docs/source/testing/modifying.rst:
--------------------------------------------------------------------------------
1 | .. _modifying:
2 |
3 | Modifying (or adding) tests
4 | ===========================
5 |
--------------------------------------------------------------------------------
/docs/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = docs
3 | skipsdist = True
4 |
5 | [testenv:docs]
6 | basepython=python
7 | changedir=source
8 | deps=sphinx==1.7.9
9 | commands=
10 | sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html
11 |
--------------------------------------------------------------------------------
/dummy-ansible-hosts:
--------------------------------------------------------------------------------
1 | # Dummy ansible host file
2 | # Used for syntax check by Travis
3 | # Before committing code please run: ansible-playbook --syntax-check site.yml -i dummy-ansible-hosts
4 | localhost
5 |
--------------------------------------------------------------------------------
/group_vars/exporters.yml.sample:
--------------------------------------------------------------------------------
1 | ---
2 | # Variables here are applicable to all host groups NOT roles
3 |
4 | # This sample file generated by generate_group_vars_sample.sh
5 |
6 | # Dummy variable to avoid error because ansible does not recognize the
7 | # file as a good configuration file when no variable in it.
8 | dummy:
9 |
10 | ###########
11 | # GENERAL #
12 | ###########
13 |
14 | #ceph_exporter_addr: "0.0.0.0"
15 | #ceph_exporter_port: 9926
16 | #ceph_exporter_stats_period: 5 # seconds
17 | #ceph_exporter_prio_limit: 5
18 |
19 | ##########
20 | # DOCKER #
21 | ##########
22 |
23 | # If you want to add parameters, you should retain the existing ones and include the new ones.
24 | #ceph_exporter_container_params:
25 | # args:
26 | # - -f
27 | # - -n=client.ceph-exporter
28 | # - --sock-dir=/var/run/ceph
29 | # - --addrs={{ ceph_exporter_addr }}
30 | # - --port={{ ceph_exporter_port }}
31 | # - --stats-period={{ ceph_exporter_stats_period }}
32 | # - --prio-limit={{ ceph_exporter_prio_limit }}
33 |
34 |
--------------------------------------------------------------------------------
/group_vars/rgwloadbalancers.yml.sample:
--------------------------------------------------------------------------------
1 | ---
2 | # Variables here are applicable to all host groups NOT roles
3 |
4 | # This sample file generated by generate_group_vars_sample.sh
5 |
6 | # Dummy variable to avoid error because ansible does not recognize the
7 | # file as a good configuration file when no variable in it.
8 | dummy:
9 |
10 | # You can override vars by using host or group vars
11 |
12 | ###########
13 | # GENERAL #
14 | ###########
15 |
16 | #haproxy_frontend_port: 80
17 | #haproxy_frontend_ssl_port: 443
18 | #haproxy_frontend_ssl_certificate:
19 | #haproxy_ssl_dh_param: 4096
20 | #haproxy_ssl_ciphers:
21 | # - EECDH+AESGCM
22 | # - EDH+AESGCM
23 | #haproxy_ssl_options:
24 | # - no-sslv3
25 | # - no-tlsv10
26 | # - no-tlsv11
27 | # - no-tls-tickets
28 | #
29 | # virtual_ips:
30 | # - 192.168.238.250
31 | # - 192.168.238.251
32 | #
33 | # virtual_ip_netmask: 24
34 | # virtual_ip_interface: ens33
35 |
36 |
--------------------------------------------------------------------------------
/infrastructure-playbooks/README.md:
--------------------------------------------------------------------------------
1 | Infrastructure playbooks
2 | ========================
3 |
4 | This directory contains a variety of playbooks that can be used independently of the Ceph roles we have.
5 | They aim to perform infrastructure related tasks that would help use managing a Ceph cluster or performing certain operational tasks.
6 |
7 | To use them, run `ansible-playbook infrastructure-playbooks/`.
8 |
--------------------------------------------------------------------------------
/infrastructure-playbooks/gather-ceph-logs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Gather ceph logs
3 | hosts:
4 | - mons
5 | - osds
6 | - mdss
7 | - rgws
8 | - nfss
9 | - rbdmirrors
10 | - clients
11 | - mgrs
12 |
13 | gather_facts: false
14 | become: true
15 |
16 | tasks:
17 | - name: Create a temp directory
18 | ansible.builtin.tempfile:
19 | state: directory
20 | prefix: ceph_ansible
21 | run_once: true
22 | register: localtempfile
23 | become: false
24 | delegate_to: localhost
25 |
26 | - name: Set_fact lookup_ceph_config - lookup keys, conf and logs
27 | ansible.builtin.find:
28 | paths:
29 | - /etc/ceph
30 | - /var/log/ceph
31 | register: ceph_collect
32 |
33 | - name: Collect ceph logs, config and keys on the machine running ansible
34 | ansible.builtin.fetch:
35 | src: "{{ item.path }}"
36 | dest: "{{ localtempfile.path }}"
37 | fail_on_missing: false
38 | flat: false
39 | with_items: "{{ ceph_collect.files }}"
40 |
--------------------------------------------------------------------------------
/infrastructure-playbooks/purge-container-cluster.yml:
--------------------------------------------------------------------------------
1 | purge-cluster.yml
--------------------------------------------------------------------------------
/infrastructure-playbooks/untested-by-ci/purge-multisite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Nukes a multisite config
3 | - hosts: rgws
4 | become: true
5 | tasks:
6 | - include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml
7 |
8 | handlers:
9 | # Ansible 2.1.0 bug will ignore included handlers without this
10 | - name: Import_tasks roles/ceph-rgw/handlers/main.yml
11 | import_tasks: roles/ceph-rgw/handlers/main.yml
12 |
--------------------------------------------------------------------------------
/library/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/library/__init__.py
--------------------------------------------------------------------------------
/module_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/module_utils/__init__.py
--------------------------------------------------------------------------------
/plugins/filter/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/plugins/filter/__init__.py
--------------------------------------------------------------------------------
/plugins/filter/dict2dict.py:
--------------------------------------------------------------------------------
1 | from __future__ import (absolute_import, division, print_function)
2 | __metaclass__ = type
3 |
4 |
5 | class FilterModule(object):
6 | ''' Loop over nested dictionaries '''
7 |
8 | def dict2dict(self, nested_dict):
9 | items = []
10 | for key, value in nested_dict.items():
11 | for k, v in value.items():
12 | items.append(
13 | (
14 | {'key': key, 'value': value},
15 | {'key': k, 'value': v},
16 | ),
17 | )
18 | return items
19 |
20 | def filters(self):
21 | return {
22 | 'dict2dict': self.dict2dict
23 | }
24 |
--------------------------------------------------------------------------------
/profiles/rgw-radosgw-static-website:
--------------------------------------------------------------------------------
1 | ---
2 | # THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE
3 | # GOAL: CONFIGURE RADOS GATEWAY WITH STATIC WEBSITE
4 | #
5 | # The following variables should be added in your group_vars/rgws.yml file
6 | # The double quotes are important, do NOT remove them.
7 |
8 | ceph_conf_overrides:
9 | "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
10 | rgw enable static website = true
11 | rgw dns s3website name = objects-website-region.domain.com
12 |
--------------------------------------------------------------------------------
/profiles/rgw-usage-log:
--------------------------------------------------------------------------------
1 | ---
2 | # THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE
3 | # GOAL: CONFIGURE RADOS GATEWAY WITH USAGE LOG
4 | #
5 | # The following variables should be added in your group_vars/rgws.yml file
6 | # The double quotes are important, do NOT remove them.
7 |
8 | ceph_conf_overrides:
9 | "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
10 | rgw enable usage log = true
11 | rgw usage log tick interval = 30
12 | rgw usage log flush threshold = 1024
13 | rgw usage max shards = 32
14 | rgw usage max user shards = 1
15 |
16 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # These are Python requirements needed to run ceph-ansible main
2 | ansible-core>=2.15,<2.17,!=2.9.10
3 | netaddr
4 | six
5 |
--------------------------------------------------------------------------------
/requirements.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # These are Ansible requirements needed to run ceph-ansible main
3 | collections:
4 | - name: https://opendev.org/openstack/ansible-config_template
5 | version: 1.2.1
6 | type: git
7 | - name: ansible.utils
8 | version: '>=2.5.0'
9 | - name: community.general
10 | - name: ansible.posix
11 |
--------------------------------------------------------------------------------
/roles/ceph-client/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-client
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-client/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs A Ceph Client
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-client/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include pre_requisite.yml
3 | ansible.builtin.include_tasks: pre_requisite.yml
4 | when: groups.get(mon_group_name, []) | length > 0
5 |
6 | - name: Include create_users_keys.yml
7 | ansible.builtin.include_tasks: create_users_keys.yml
8 | when:
9 | - user_config | bool
10 | - not rolling_update | default(False) | bool
11 |
--------------------------------------------------------------------------------
/roles/ceph-common/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-common
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/ceph-common/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/create_rbd_client_dir.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create rbd client directory
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | owner: "{{ rbd_client_directory_owner }}"
7 | group: "{{ rbd_client_directory_group }}"
8 | mode: "{{ rbd_client_directory_mode }}"
9 | with_items:
10 | - "{{ rbd_client_admin_socket_path }}"
11 | - "{{ rbd_client_log_path }}"
12 | when: rbd_client_directories | bool
13 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include debian_community_repository.yml
3 | ansible.builtin.include_tasks: debian_community_repository.yml
4 | when: ceph_repository == 'community'
5 |
6 | - name: Include debian_dev_repository.yml
7 | ansible.builtin.include_tasks: debian_dev_repository.yml
8 | when: ceph_repository == 'dev'
9 |
10 | - name: Include debian_custom_repository.yml
11 | ansible.builtin.include_tasks: debian_custom_repository.yml
12 | when: ceph_repository == 'custom'
13 |
14 | - name: Include debian_uca_repository.yml
15 | ansible.builtin.include_tasks: debian_uca_repository.yml
16 | when: ceph_repository == 'uca'
17 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include suse_obs_repository.yml
3 | ansible.builtin.include_tasks: suse_obs_repository.yml
4 | when: ceph_repository == 'obs'
5 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/debian_community_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install dependencies for apt modules
3 | ansible.builtin.package:
4 | name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
5 | update_cache: true
6 | register: result
7 | until: result is succeeded
8 |
9 | - name: Configure debian ceph community repository stable key
10 | ansible.builtin.apt_key:
11 | data: "{{ lookup('file', role_path + '/files/cephstable.asc') }}"
12 | state: present
13 | register: result
14 | until: result is succeeded
15 |
16 | - name: Configure debian ceph stable community repository
17 | ansible.builtin.apt_repository:
18 | repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
19 | state: present
20 | update_cache: true
21 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/debian_custom_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Configure debian custom apt key
3 | ansible.builtin.apt_key:
4 | url: "{{ ceph_custom_key }}"
5 | state: present
6 | register: result
7 | until: result is succeeded
8 | when: ceph_custom_key is defined
9 |
10 | - name: Configure debian custom repository
11 | ansible.builtin.apt_repository:
12 | repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
13 | state: present
14 | update_cache: true
15 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/debian_dev_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Fetch ceph debian development repository
3 | ansible.builtin.uri:
4 | url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo?arch={{ ansible_facts['architecture'] }}"
5 | return_content: true
6 | register: ceph_dev_deb_repo
7 |
8 | - name: Configure ceph debian development repository
9 | ansible.builtin.apt_repository:
10 | repo: "{{ ceph_dev_deb_repo.content }}"
11 | state: present
12 | update_cache: true
13 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/debian_uca_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add ubuntu cloud archive key package
3 | ansible.builtin.package:
4 | name: ubuntu-cloud-keyring
5 | register: result
6 | until: result is succeeded
7 |
8 | - name: Add ubuntu cloud archive repository
9 | ansible.builtin.apt_repository:
10 | repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main"
11 | state: present
12 | update_cache: true
13 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_debian_packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install ceph for debian
3 | ansible.builtin.apt:
4 | name: "{{ debian_ceph_pkgs | unique }}"
5 | update_cache: false
6 | state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
7 | default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
8 | register: result
9 | until: result is succeeded
10 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_on_clear.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install ceph bundle
3 | community.general.swupd:
4 | name: storage-cluster
5 | state: present
6 | register: result
7 | until: result is succeeded
8 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_on_debian.yml:
--------------------------------------------------------------------------------
1 | - name: Install dependencies
2 | ansible.builtin.apt:
3 | name: "{{ debian_package_dependencies }}"
4 | state: present
5 | update_cache: true
6 | cache_valid_time: 3600
7 | register: result
8 | until: result is succeeded
9 |
10 | - name: Include install_debian_packages.yml
11 | ansible.builtin.include_tasks: install_debian_packages.yml
12 | when:
13 | - (ceph_origin == 'repository' or ceph_origin == 'distro')
14 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_redhat_packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install redhat dependencies
3 | ansible.builtin.package:
4 | name: "{{ redhat_package_dependencies }}"
5 | state: present
6 | register: result
7 | until: result is succeeded
8 | when: ansible_facts['distribution'] == 'RedHat'
9 |
10 | - name: Install centos dependencies
11 | ansible.builtin.yum:
12 | name: "{{ centos_package_dependencies }}"
13 | state: present
14 | register: result
15 | until: result is succeeded
16 | when: ansible_facts['distribution'] == 'CentOS'
17 |
18 | - name: Install redhat ceph packages
19 | ansible.builtin.package:
20 | name: "{{ redhat_ceph_pkgs | unique }}"
21 | state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
22 | register: result
23 | until: result is succeeded
24 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_suse_packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install SUSE/openSUSE dependencies
3 | ansible.builtin.package:
4 | name: "{{ suse_package_dependencies }}"
5 | state: present
6 | register: result
7 | until: result is succeeded
8 |
9 | - name: Install SUSE/openSUSE ceph packages
10 | ansible.builtin.package:
11 | name: "{{ suse_ceph_pkgs | unique }}"
12 | state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
13 | register: result
14 | until: result is succeeded
15 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/redhat_custom_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Configure red hat custom rpm key
3 | ansible.builtin.rpm_key:
4 | key: "{{ ceph_custom_key }}"
5 | state: present
6 | register: result
7 | until: result is succeeded
8 | when: ceph_custom_key is defined
9 |
10 | - name: Configure red hat custom repository
11 | ansible.builtin.get_url:
12 | url: "{{ ceph_custom_repo }}"
13 | dest: /etc/yum.repos.d
14 | owner: root
15 | group: root
16 | mode: "0644"
17 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/redhat_dev_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Fetch ceph red hat development repository
3 | ansible.builtin.uri:
4 | # Use the centos repo since we don't currently have a dedicated red hat repo
5 | url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/centos/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}"
6 | return_content: true
7 | register: ceph_dev_yum_repo
8 |
9 | - name: Configure ceph red hat development repository
10 | ansible.builtin.copy:
11 | content: "{{ ceph_dev_yum_repo.content }}"
12 | dest: /etc/yum.repos.d/ceph-dev.repo
13 | owner: root
14 | group: root
15 | mode: "0644"
16 | backup: true
17 |
18 | - name: Remove ceph_stable repositories
19 | ansible.builtin.yum_repository:
20 | name: '{{ item }}'
21 | file: ceph_stable
22 | state: absent
23 | with_items:
24 | - ceph_stable
25 | - ceph_stable_noarch
26 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/suse_obs_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Configure openSUSE ceph OBS repository
3 | community.general.zypper_repository:
4 | name: "OBS:filesystems:ceph:{{ ceph_release }}"
5 | state: present
6 | repo: "{{ ceph_obs_repo }}"
7 | auto_import_keys: true
8 | autorefresh: true
9 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/selinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: If selinux is not disabled
3 | when: ansible_facts['selinux']['status'] == 'enabled'
4 | block:
5 | - name: Install policycoreutils-python
6 | ansible.builtin.package:
7 | name: policycoreutils-python
8 | state: present
9 | register: result
10 | until: result is succeeded
11 | when: ansible_facts['distribution_major_version'] == '7'
12 |
13 | - name: Install python3-policycoreutils on RHEL 8
14 | ansible.builtin.package:
15 | name: python3-policycoreutils
16 | state: present
17 | register: result
18 | until: result is succeeded
19 | when:
20 | - inventory_hostname in groups.get(nfs_group_name, [])
21 | or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
22 | - ansible_facts['distribution_major_version'] == '8'
23 |
--------------------------------------------------------------------------------
/roles/ceph-config/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-config
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-config/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Guillaume Abrioux
5 | description: Handles ceph-ansible initial configuration
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-config/tasks/create_ceph_initial_dirs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create ceph initial directories
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | owner: "{{ ceph_uid }}"
7 | group: "{{ ceph_uid }}"
8 | mode: "0755"
9 | loop:
10 | - /etc/ceph
11 | - /var/lib/ceph/
12 | - /var/lib/ceph/mon
13 | - /var/lib/ceph/osd
14 | - /var/lib/ceph/mds
15 | - /var/lib/ceph/tmp
16 | - /var/lib/ceph/crash
17 | - /var/lib/ceph/radosgw
18 | - /var/lib/ceph/bootstrap-rgw
19 | - /var/lib/ceph/bootstrap-mgr
20 | - /var/lib/ceph/bootstrap-mds
21 | - /var/lib/ceph/bootstrap-osd
22 | - /var/lib/ceph/bootstrap-rbd
23 | - /var/lib/ceph/bootstrap-rbd-mirror
24 | - /var/run/ceph
25 | - /var/log/ceph
26 |
--------------------------------------------------------------------------------
/roles/ceph-config/tasks/rgw_systemd_environment_file.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create rados gateway instance directories
3 | ansible.builtin.file:
4 | path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
5 | state: directory
6 | owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
7 | group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
8 | mode: "{{ ceph_directories_mode | default('0755') }}"
9 | with_items: "{{ rgw_instances }}"
10 |
11 | - name: Generate environment file
12 | ansible.builtin.copy:
13 | dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
14 | owner: "root"
15 | group: "root"
16 | mode: "0644"
17 | content: |
18 | INST_NAME={{ item.instance_name }}
19 | with_items: "{{ rgw_instances }}"
20 | when:
21 | - containerized_deployment | bool
22 | - rgw_instances is defined
23 |
--------------------------------------------------------------------------------
/roles/ceph-container-common/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-container-common
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-container-common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/ceph-container-common/files/ceph.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=ceph target allowing to start/stop all ceph*@.service instances at once
3 |
4 | [Install]
5 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/roles/ceph-container-common/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-container-common/tasks/registry.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Container registry authentication
3 | ansible.builtin.command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}'
4 | args:
5 | stdin: '{{ ceph_docker_registry_password }}'
6 | stdin_add_newline: false
7 | changed_when: false
8 | environment:
9 | HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
10 | HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
11 | NO_PROXY: "{{ ceph_docker_no_proxy }}"
12 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-container-engine
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Guillaume Abrioux
5 | description: Handles container installation prerequisites
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: Ubuntu
10 | versions:
11 | - xenial
12 | - name: EL
13 | versions:
14 | - 'all'
15 | galaxy_tags:
16 | - system
17 | dependencies: []
18 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include pre_requisites/prerequisites.yml
3 | ansible.builtin.include_tasks: pre_requisites/prerequisites.yml
4 | when: not is_atomic | bool
5 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/templates/docker-proxy.conf.j2:
--------------------------------------------------------------------------------
1 | [Service]
2 | {% if ceph_docker_http_proxy is defined %}
3 | Environment="HTTP_PROXY={{ ceph_docker_http_proxy }}"
4 | {% endif %}
5 | {% if ceph_docker_https_proxy is defined %}
6 | Environment="HTTPS_PROXY={{ ceph_docker_https_proxy }}"
7 | {% endif %}
8 | Environment="NO_PROXY={{ ceph_docker_no_proxy }}"
9 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/vars/CentOS-8.yml:
--------------------------------------------------------------------------------
1 | RedHat-8.yml
--------------------------------------------------------------------------------
/roles/ceph-container-engine/vars/CentOS-9.yml:
--------------------------------------------------------------------------------
1 | ---
2 | container_package_name: podman
3 | container_service_name: podman
4 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | container_package_name: docker-ce
3 | container_service_name: docker
4 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/vars/RedHat-8.yml:
--------------------------------------------------------------------------------
1 | ---
2 | container_package_name: podman
3 | container_service_name: podman
4 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/vars/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | container_package_name: docker
3 | container_service_name: docker
4 |
--------------------------------------------------------------------------------
/roles/ceph-container-engine/vars/Ubuntu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | container_package_name: docker.io
3 | container_service_name: docker
4 |
--------------------------------------------------------------------------------
/roles/ceph-crash/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Guillaume Abrioux
5 | description: Deploy ceph-crash
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-crash/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file for ceph-crash container
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-crash.service.j2"
5 | dest: /etc/systemd/system/ceph-crash@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph crash
10 |
--------------------------------------------------------------------------------
/roles/ceph-dashboard/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Boris Ranto
5 | description: Configures Ceph Dashboard
6 | license: Apache
7 | min_ansible_version: '2.4'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set grafana url
3 | ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}"
4 | delegate_to: "{{ groups[mon_group_name][0] }}"
5 | run_once: true
6 | changed_when: false
7 |
8 | - name: Inject grafana dashboard layouts
9 | ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update"
10 | delegate_to: "{{ groups[mon_group_name][0] }}"
11 | run_once: true
12 | changed_when: false
13 | when: containerized_deployment | bool
14 |
--------------------------------------------------------------------------------
/roles/ceph-dashboard/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include configure_dashboard.yml
3 | ansible.builtin.include_tasks: configure_dashboard.yml
4 |
5 | - name: Print dashboard URL
6 | ansible.builtin.debug:
7 | msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
8 | run_once: true
9 |
--------------------------------------------------------------------------------
/roles/ceph-defaults/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-defaults
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-defaults/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Handles ceph-ansible default vars for all roles
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: Ubuntu
10 | versions:
11 | - xenial
12 | - name: EL
13 | versions:
14 | - 'all'
15 | galaxy_tags:
16 | - system
17 | dependencies: []
18 |
--------------------------------------------------------------------------------
/roles/ceph-defaults/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/ceph-defaults/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_osd_pool_default_crush_rule: -1
3 | ceph_osd_pool_default_crush_rule_name: "replicated_rule"
4 |
--------------------------------------------------------------------------------
/roles/ceph-exporter/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###########
3 | # GENERAL #
4 | ###########
5 |
6 | ceph_exporter_addr: "0.0.0.0"
7 | ceph_exporter_port: 9926
8 | ceph_exporter_stats_period: 5 # seconds
9 | ceph_exporter_prio_limit: 5
10 |
11 | ##########
12 | # DOCKER #
13 | ##########
14 |
15 | # If you want to add parameters, you should retain the existing ones and include the new ones.
16 | ceph_exporter_container_params:
17 | args:
18 | - -f
19 | - -n=client.ceph-exporter
20 | - --sock-dir=/var/run/ceph
21 | - --addrs={{ ceph_exporter_addr }}
22 | - --port={{ ceph_exporter_port }}
23 | - --stats-period={{ ceph_exporter_stats_period }}
24 | - --prio-limit={{ ceph_exporter_prio_limit }}
25 |
--------------------------------------------------------------------------------
/roles/ceph-exporter/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Guillaume Abrioux
5 | description: Deploy ceph-exporter
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-exporter/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file for ceph-exporter container
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-exporter.service.j2"
5 | dest: /etc/systemd/system/ceph-exporter@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph exporter
10 |
--------------------------------------------------------------------------------
/roles/ceph-facts/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-facts
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-facts/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Guillaume Abrioux
5 | description: Set some facts for ceph to be deployed
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: Ubuntu
10 | versions:
11 | - xenial
12 | - name: EL
13 | versions:
14 | - 'all'
15 | galaxy_tags:
16 | - system
17 | dependencies: []
18 |
--------------------------------------------------------------------------------
/roles/ceph-facts/tasks/container_binary.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check if podman binary is present
3 | ansible.builtin.stat:
4 | path: /usr/bin/podman
5 | register: podman_binary
6 |
7 | - name: Set_fact container_binary
8 | ansible.builtin.set_fact:
9 | container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] in ['8', '9']) else 'docker' }}"
10 | when: not docker2podman | default(false) | bool
11 |
--------------------------------------------------------------------------------
/roles/ceph-facts/tasks/get_def_crush_rule_name.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get current default crush rule details
3 | ceph_crush_rule_info:
4 | cluster: "{{ cluster }}"
5 | environment:
6 | CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
7 | CEPH_CONTAINER_BINARY: "{{ container_binary }}"
8 | register: default_crush_rule_details
9 | delegate_to: "{{ delegated_node | default(groups[mon_group_name][0]) }}"
10 | run_once: true
11 |
12 | - name: Get current default crush rule name
13 | ansible.builtin.set_fact:
14 | ceph_osd_pool_default_crush_rule_name: "{{ item.rule_name }}"
15 | with_items: "{{ default_crush_rule_details.stdout | default('{}', True) | from_json }}"
16 | run_once: true
17 | when: item.rule_id | int == osd_pool_default_crush_rule | int
18 |
--------------------------------------------------------------------------------
/roles/ceph-facts/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include facts.yml
3 | ansible.builtin.include_tasks: facts.yml
4 |
--------------------------------------------------------------------------------
/roles/ceph-facts/tasks/set_monitor_address.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set_fact _monitor_addresses - ipv4
3 | ansible.builtin.set_fact:
4 | _monitor_addresses: "{{ _monitor_addresses | default({}) | combine({item: hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['public_network'].split(',')) | first}) }}"
5 | with_items: "{{ groups.get(mon_group_name, []) }}"
6 | when:
7 | - ip_version == 'ipv4'
8 |
9 | - name: Set_fact _monitor_addresses - ipv6
10 | ansible.builtin.set_fact:
11 | _monitor_addresses: "{{ _monitor_addresses | default({}) | combine({item: hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['public_network'].split(',')) | last | ansible.utils.ipwrap}) }}"
12 | with_items: "{{ groups.get(mon_group_name, []) }}"
13 | when:
14 | - ip_version == 'ipv6'
15 |
--------------------------------------------------------------------------------
/roles/ceph-fetch-keys/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-fetch
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-fetch-keys/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Variables here are applicable to all host groups NOT roles
3 |
4 | # This sample file generated by generate_group_vars_sample.sh
5 |
6 | # Dummy variable to avoid error because ansible does not recognize the
7 | # file as a good configuration file when no variable in it.
8 | dummy:
9 |
10 | fetch_directory: fetch/
11 |
--------------------------------------------------------------------------------
/roles/ceph-fetch-keys/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Andrew Schoen
5 | description: Fetches ceph keys from monitors.
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-fetch-keys/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Lookup keys in /etc/ceph
3 | ansible.builtin.shell: ls -1 /etc/ceph/*.keyring
4 | changed_when: false
5 | register: ceph_keys
6 |
7 | - name: Create a local fetch directory if it does not exist
8 | ansible.builtin.file:
9 | path: "{{ fetch_directory }}"
10 | state: directory
11 | mode: "0755"
12 | delegate_to: localhost
13 | become: false
14 |
15 | - name: Copy ceph user and bootstrap keys to the ansible server
16 | ansible.builtin.fetch:
17 | src: "{{ item }}"
18 | dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
19 | flat: true
20 | fail_on_missing: false
21 | run_once: true
22 | with_items:
23 | - "{{ ceph_keys.stdout_lines }}"
24 | - "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring"
25 | - "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
26 | - "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
27 | - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
28 | - "/var/lib/ceph/bootstrap-mgr/{{ cluster }}.keyring"
29 |
--------------------------------------------------------------------------------
/roles/ceph-grafana/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Boris Ranto
5 | description: Configures Grafana for Ceph Dashboard
6 | license: Apache
7 | min_ansible_version: "2.4"
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-grafana/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include setup_container.yml
3 | ansible.builtin.include_tasks: setup_container.yml
4 |
5 | - name: Include configure_grafana.yml
6 | ansible.builtin.include_tasks: configure_grafana.yml
7 |
--------------------------------------------------------------------------------
/roles/ceph-grafana/tasks/setup_container.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create /etc/grafana and /var/lib/grafana
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | owner: "{{ grafana_uid }}"
7 | group: "{{ grafana_uid }}"
8 | recurse: true
9 | with_items:
10 | - /etc/grafana
11 | - /var/lib/grafana
12 |
13 | - name: Include_tasks systemd.yml
14 | ansible.builtin.include_tasks: systemd.yml
15 |
16 | - name: Start the grafana-server service
17 | ansible.builtin.systemd:
18 | name: grafana-server
19 | state: started
20 | enabled: true
21 | daemon_reload: true
22 | failed_when: false
23 |
--------------------------------------------------------------------------------
/roles/ceph-grafana/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ship systemd service
3 | ansible.builtin.template:
4 | src: grafana-server.service.j2
5 | dest: "/etc/systemd/system/grafana-server.service"
6 | owner: root
7 | group: root
8 | mode: "0644"
9 |
--------------------------------------------------------------------------------
/roles/ceph-grafana/templates/dashboards-ceph-dashboard.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | - name: 'Ceph Dashboard'
5 | orgId: 1
6 | folder: 'ceph-dashboard'
7 | type: file
8 | disableDeletion: false
9 | updateIntervalSeconds: 3
10 | editable: false
11 | options:
12 | path: '{{ grafana_dashboards_path }}'
13 |
--------------------------------------------------------------------------------
/roles/ceph-grafana/templates/datasources-ceph-dashboard.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | # list of datasources that should be deleted from the database
4 | deleteDatasources:
5 | - name: '{{ grafana_datasource }}'
6 | orgId: 1
7 |
8 | # list of datasources to insert/update depending
9 | # what's available in the database
10 | datasources:
11 | # name of the datasource. Required
12 | - name: '{{ grafana_datasource }}'
13 | # datasource type. Required
14 | type: 'prometheus'
15 | # access mode. proxy or direct (Server or Browser in the UI). Required
16 | access: 'proxy'
17 | # org id. will default to orgId 1 if not specified
18 | orgId: 1
19 | # url
20 | url: 'http://{{ grafana_server_addr }}:{{ prometheus_port }}'
21 | # enable/disable basic auth
22 | basicAuth: false
23 | # mark as default datasource. Max one per org
24 | isDefault: true
25 | # allow users to edit datasources from the UI.
26 | editable: false
27 |
--------------------------------------------------------------------------------
/roles/ceph-grafana/templates/grafana.ini.j2:
--------------------------------------------------------------------------------
1 | # [server]
2 | # root_url = %(protocol)s://%(domain)s:%(http_port)s/api/grafana/proxy
3 |
4 | [users]
5 | default_theme = light
6 |
7 | #################################### Anonymous Auth ##########################
8 | [auth.anonymous]
9 | # enable anonymous access
10 | enabled = true
11 |
12 | # specify organization name that should be used for unauthenticated users
13 | org_name = Main Org.
14 |
15 | # specify role for unauthenticated users
16 | org_role = Viewer
17 |
18 | [server]
19 | cert_file = /etc/grafana/ceph-dashboard.crt
20 | cert_key = /etc/grafana/ceph-dashboard.key
21 | domain = {{ ansible_facts['fqdn'] }}
22 | protocol = {{ dashboard_protocol }}
23 | http_port = {{ grafana_port }}
24 | http_addr = {{ grafana_server_addr }}
25 |
26 | [security]
27 | admin_user = {{ grafana_admin_user }}
28 | admin_password = {{ grafana_admin_password }}
29 | allow_embedding = {{ grafana_allow_embedding }}
30 | {% if dashboard_protocol == 'https' %}
31 | cookie_secure = true
32 |
33 | [session]
34 | cookie_secure = true
35 | {% endif %}
--------------------------------------------------------------------------------
/roles/ceph-handler/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-handler
2 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
3 |
--------------------------------------------------------------------------------
/roles/ceph-handler/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Contains handlers for Ceph services
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-handler/tasks/check_running_cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include check_running_containers.yml
3 | ansible.builtin.include_tasks: check_running_containers.yml
4 | when: containerized_deployment | bool
5 |
6 | - name: Include check_socket_non_container.yml
7 | ansible.builtin.include_tasks: check_socket_non_container.yml
8 | when: not containerized_deployment | bool
9 |
--------------------------------------------------------------------------------
/roles/ceph-handler/tasks/handler_crash.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set _crash_handler_called before restart
3 | ansible.builtin.set_fact:
4 | _crash_handler_called: true
5 |
6 | - name: Restart the ceph-crash service # noqa: ignore-errors
7 | ansible.builtin.systemd:
8 | name: ceph-crash@{{ ansible_facts['hostname'] }}
9 | state: restarted
10 | enabled: true
11 | masked: false
12 | daemon_reload: true
13 | ignore_errors: true
14 | when: hostvars[inventory_hostname]['_crash_handler_called'] | default(False) | bool
15 |
16 | - name: Set _crash_handler_called after restart
17 | ansible.builtin.set_fact:
18 | _crash_handler_called: false
19 |
--------------------------------------------------------------------------------
/roles/ceph-handler/tasks/handler_exporter.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set _exporter_handler_called before restart
3 | ansible.builtin.set_fact:
4 | _exporter_handler_called: true
5 |
6 | - name: Restart the ceph-exporter service # noqa: ignore-errors
7 | ansible.builtin.systemd:
8 | name: ceph-exporter@{{ ansible_facts['hostname'] }}
9 | state: restarted
10 | enabled: true
11 | masked: false
12 | daemon_reload: true
13 | ignore_errors: true
14 | when: hostvars[inventory_hostname]['_exporter_handler_called'] | default(False) | bool
15 |
16 | - name: Set _exporter_handler_called after restart
17 | ansible.builtin.set_fact:
18 | _exporter_handler_called: false
19 |
--------------------------------------------------------------------------------
/roles/ceph-handler/tasks/handler_mdss.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set _mds_handler_called before restart
3 | ansible.builtin.set_fact:
4 | _mds_handler_called: true
5 |
6 | - name: Copy mds restart script
7 | ansible.builtin.template:
8 | src: restart_mds_daemon.sh.j2
9 | dest: "{{ tmpdirpath.path }}/restart_mds_daemon.sh"
10 | owner: root
11 | group: root
12 | mode: "0750"
13 | when: tmpdirpath.path is defined
14 |
15 | - name: Restart ceph mds daemon(s)
16 | ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mds_daemon.sh
17 | when:
18 | - hostvars[item]['handler_mds_status'] | default(False) | bool
19 | - hostvars[item]['_mds_handler_called'] | default(False) | bool
20 | - hostvars[item].tmpdirpath.path is defined
21 | with_items: "{{ groups[mds_group_name] }}"
22 | delegate_to: "{{ item }}"
23 | changed_when: false
24 | run_once: true
25 |
26 | - name: Set _mds_handler_called after restart
27 | ansible.builtin.set_fact:
28 | _mds_handler_called: false
29 |
--------------------------------------------------------------------------------
/roles/ceph-handler/tasks/handler_mgrs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set _mgr_handler_called before restart
3 | ansible.builtin.set_fact:
4 | _mgr_handler_called: true
5 |
6 | - name: Copy mgr restart script
7 | ansible.builtin.template:
8 | src: restart_mgr_daemon.sh.j2
9 | dest: "{{ tmpdirpath.path }}/restart_mgr_daemon.sh"
10 | owner: root
11 | group: root
12 | mode: "0750"
13 | when: tmpdirpath.path is defined
14 |
15 | - name: Restart ceph mgr daemon(s)
16 | ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mgr_daemon.sh
17 | when:
18 | - hostvars[item]['handler_mgr_status'] | default(False) | bool
19 | - hostvars[item]['_mgr_handler_called'] | default(False) | bool
20 | - hostvars[item].tmpdirpath.path is defined
21 | with_items: "{{ groups[mgr_group_name] }}"
22 | delegate_to: "{{ item }}"
23 | changed_when: false
24 | run_once: true
25 |
26 | - name: Set _mgr_handler_called after restart
27 | ansible.builtin.set_fact:
28 | _mgr_handler_called: false
29 |
--------------------------------------------------------------------------------
/roles/ceph-handler/tasks/handler_nfss.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set _nfs_handler_called before restart
3 | ansible.builtin.set_fact:
4 | _nfs_handler_called: true
5 |
6 | - name: Copy nfs restart script
7 | ansible.builtin.template:
8 | src: restart_nfs_daemon.sh.j2
9 | dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh"
10 | owner: root
11 | group: root
12 | mode: "0750"
13 | when: tmpdirpath.path is defined
14 |
15 | - name: Restart ceph nfs daemon(s)
16 | ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh
17 | when:
18 | - hostvars[item]['handler_nfs_status'] | default(False) | bool
19 | - hostvars[item]['_nfs_handler_called'] | default(False) | bool
20 | - hostvars[item].tmpdirpath.path is defined
21 | with_items: "{{ groups[nfs_group_name] }}"
22 | delegate_to: "{{ item }}"
23 | changed_when: false
24 | run_once: true
25 |
26 | - name: Set _nfs_handler_called after restart
27 | ansible.builtin.set_fact:
28 | _nfs_handler_called: false
29 |
--------------------------------------------------------------------------------
/roles/ceph-handler/tasks/handler_rgws.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set _rgw_handler_called before restart
3 | ansible.builtin.set_fact:
4 | _rgw_handler_called: true
5 |
6 | - name: Copy rgw restart script
7 | ansible.builtin.template:
8 | src: restart_rgw_daemon.sh.j2
9 | dest: "{{ tmpdirpath.path }}/restart_rgw_daemon.sh"
10 | owner: root
11 | group: root
12 | mode: "0750"
13 | when: tmpdirpath.path is defined
14 |
15 | - name: Restart ceph rgw daemon(s)
16 | ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rgw_daemon.sh
17 | when:
18 | - hostvars[item]['handler_rgw_status'] | default(False) | bool
19 | - hostvars[item]['_rgw_handler_called'] | default(False) | bool
20 | - hostvars[item].tmpdirpath.path is defined
21 | with_items: "{{ groups[rgw_group_name] }}"
22 | delegate_to: "{{ item }}"
23 | changed_when: false
24 | run_once: true
25 |
26 | - name: Set _rgw_handler_called after restart
27 | ansible.builtin.set_fact:
28 | _rgw_handler_called: false
29 |
--------------------------------------------------------------------------------
/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | RETRIES="{{ handler_health_nfs_check_retries }}"
4 | DELAY="{{ handler_health_nfs_check_delay }}"
5 | NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
6 | PID=/var/run/ganesha/ganesha.pid
7 | {% if containerized_deployment | bool %}
8 | DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
9 | {% endif %}
10 |
11 | # First, restart the daemon
12 | {% if containerized_deployment | bool -%}
13 | systemctl restart $NFS_NAME
14 | # Wait and ensure the pid exists after restarting the daemon
15 | while [ $RETRIES -ne 0 ]; do
16 | $DOCKER_EXEC test -f $PID && exit 0
17 | sleep $DELAY
18 | let RETRIES=RETRIES-1
19 | done
20 | # If we reach this point, it means the pid is not present.
21 | echo "PID file ${PID} could not be found, which means Ganesha is not running. Showing $NFS_NAME unit logs now:"
22 | journalctl -u $NFS_NAME
23 | exit 1
24 | {% else %}
25 | systemctl restart nfs-ganesha
26 | {% endif %}
27 |
--------------------------------------------------------------------------------
/roles/ceph-infra/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Disable ntpd
3 | failed_when: false
4 | ansible.builtin.service:
5 | name: '{{ ntp_service_name }}'
6 | state: stopped
7 | enabled: false
8 |
9 | - name: Disable chronyd
10 | failed_when: false
11 | ansible.builtin.service:
12 | name: '{{ chrony_daemon_name }}'
13 | enabled: false
14 | state: stopped
15 |
16 | - name: Disable timesyncd
17 | failed_when: false
18 | ansible.builtin.service:
19 | name: timesyncd
20 | enabled: false
21 | state: stopped
22 |
--------------------------------------------------------------------------------
/roles/ceph-infra/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Guillaume Abrioux
5 | description: Handles ceph infra requirements (ntp, firewall, ...)
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-infra/templates/logrotate.conf.j2:
--------------------------------------------------------------------------------
1 | /var/log/ceph/*.log {
2 | rotate {{ ceph_logrotate_rotate | default(7) }}
3 | {{ ceph_logrotate_frequency | default('daily') }}
4 | compress
5 | sharedscripts
6 | postrotate
7 | killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || pkill -1 -x "ceph-mon|ceph-mgr|ceph-mds|ceph-osd|ceph-fuse|radosgw|rbd-mirror" || true
8 | endscript
9 | missingok
10 | notifempty
11 | su root root
12 | }
13 |
--------------------------------------------------------------------------------
/roles/ceph-mds/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-mds
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-mds/files/ceph-mds.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=ceph target allowing to start/stop all ceph-mds@.service instances at once
3 | PartOf=ceph.target
4 | After=ceph-mon.target
5 | Before=ceph.target
6 | Wants=ceph.target ceph-mon.target
7 |
8 | [Install]
9 | WantedBy=multi-user.target ceph.target
--------------------------------------------------------------------------------
/roles/ceph-mds/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph Metadata
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/containerized.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include_tasks systemd.yml
3 | ansible.builtin.include_tasks: systemd.yml
4 |
5 | - name: Enable ceph-mds.target
6 | ansible.builtin.service:
7 | name: ceph-mds.target
8 | enabled: true
9 | daemon_reload: true
10 | when: containerized_deployment | bool
11 |
12 | - name: Systemd start mds container
13 | ansible.builtin.systemd:
14 | name: ceph-mds@{{ ansible_facts['hostname'] }}
15 | state: started
16 | enabled: true
17 | masked: false
18 | daemon_reload: true
19 |
20 | - name: Wait for mds socket to exist
21 | ansible.builtin.command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
22 | changed_when: false
23 | register: multi_mds_socket
24 | retries: 5
25 | delay: 15
26 | until: multi_mds_socket.rc == 0
27 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include create_mds_filesystems.yml
3 | ansible.builtin.include_tasks: create_mds_filesystems.yml
4 | when:
5 | - inventory_hostname == groups[mds_group_name] | first
6 | - not rolling_update | bool
7 |
8 | - name: Include common.yml
9 | ansible.builtin.include_tasks: common.yml
10 |
11 | - name: Non_containerized.yml
12 | ansible.builtin.include_tasks: non_containerized.yml
13 | when: not containerized_deployment | bool
14 |
15 | - name: Containerized.yml
16 | ansible.builtin.include_tasks: containerized.yml
17 | when: containerized_deployment | bool
18 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-mds.service.j2"
5 | dest: /etc/systemd/system/ceph-mds@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph mdss
10 |
11 | - name: Generate systemd ceph-mds target file
12 | ansible.builtin.copy:
13 | src: ceph-mds.target
14 | dest: /etc/systemd/system/ceph-mds.target
15 | mode: "0644"
16 | when: containerized_deployment | bool
17 |
--------------------------------------------------------------------------------
/roles/ceph-mds/templates/ceph-mds.service.d-overrides.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
--------------------------------------------------------------------------------
/roles/ceph-mgr/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-mgr
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-mgr/files/ceph-mgr.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=ceph target allowing to start/stop all ceph-mgr@.service instances at once
3 | PartOf=ceph.target
4 | After=ceph-mon.target
5 | Before=ceph.target
6 | Wants=ceph.target ceph-mon.target
7 |
8 | [Install]
9 | WantedBy=multi-user.target ceph.target
--------------------------------------------------------------------------------
/roles/ceph-mgr/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph Manager
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-mgr/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-mgr.service.j2"
5 | dest: /etc/systemd/system/ceph-mgr@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph mgrs
10 |
11 | - name: Generate systemd ceph-mgr target file
12 | ansible.builtin.copy:
13 | src: ceph-mgr.target
14 | dest: /etc/systemd/system/ceph-mgr.target
15 | mode: "0644"
16 | when: containerized_deployment | bool
17 |
--------------------------------------------------------------------------------
/roles/ceph-mgr/templates/ceph-mgr.service.d-overrides.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
--------------------------------------------------------------------------------
/roles/ceph-mon/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-mon
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-mon/files/ceph-mon.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=ceph target allowing to start/stop all ceph-mon@.service instances at once
3 | PartOf=ceph.target
4 | Before=ceph.target
5 | Wants=ceph.target
6 |
7 | [Install]
8 | WantedBy=multi-user.target ceph.target
--------------------------------------------------------------------------------
/roles/ceph-mon/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph Monitor
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/secure_cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Collect all the pools
3 | ansible.builtin.command: >
4 | {{ container_exec_cmd }} rados --cluster {{ cluster }} lspools
5 | changed_when: false
6 | register: ceph_pools
7 | check_mode: false
8 |
9 | - name: Secure the cluster
10 | ansible.builtin.command: >
11 | {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
12 | changed_when: false
13 | with_nested:
14 | - "{{ ceph_pools.stdout_lines | default([]) }}"
15 | - "{{ secure_cluster_flags }}"
16 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file for mon container
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-mon.service.j2"
5 | dest: /etc/systemd/system/ceph-mon@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph mons
10 |
11 | - name: Generate systemd ceph-mon target file
12 | ansible.builtin.copy:
13 | src: ceph-mon.target
14 | dest: /etc/systemd/system/ceph-mon.target
15 | mode: "0644"
16 | when: containerized_deployment | bool
17 |
18 | - name: Enable ceph-mon.target
19 | ansible.builtin.service:
20 | name: ceph-mon.target
21 | enabled: true
22 | daemon_reload: true
23 | when: containerized_deployment | bool
24 |
--------------------------------------------------------------------------------
/roles/ceph-mon/templates/ceph-mon.service.d-overrides.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
--------------------------------------------------------------------------------
/roles/ceph-nfs/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-nfs
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-nfs/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Daniel Gryniewicz
5 | description: Installs Ceph NFS Gateway
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-nfs/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-nfs.service.j2"
5 | dest: /etc/systemd/system/ceph-nfs@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph nfss
10 |
--------------------------------------------------------------------------------
/roles/ceph-node-exporter/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Boris Ranto
5 | description: Configures Prometheus Node Exporter
6 | license: Apache
7 | min_ansible_version: '2.4'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-node-exporter/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include setup_container.yml
3 | ansible.builtin.include_tasks: setup_container.yml
4 |
--------------------------------------------------------------------------------
/roles/ceph-node-exporter/tasks/setup_container.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include_tasks systemd.yml
3 | ansible.builtin.include_tasks: systemd.yml
4 |
5 | - name: Start the node_exporter service
6 | ansible.builtin.systemd:
7 | name: node_exporter
8 | state: started
9 | enabled: true
10 | daemon_reload: true
11 | failed_when: false
12 |
--------------------------------------------------------------------------------
/roles/ceph-node-exporter/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ship systemd service
3 | ansible.builtin.template:
4 | src: node_exporter.service.j2
5 | dest: "/etc/systemd/system/node_exporter.service"
6 | owner: root
7 | group: root
8 | mode: "0644"
9 |
--------------------------------------------------------------------------------
/roles/ceph-osd/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-osd
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-osd/files/ceph-osd.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=ceph target allowing to start/stop all ceph-osd@.service instances at once
3 | PartOf=ceph.target
4 | After=ceph-mon.target
5 | Before=ceph.target
6 | Wants=ceph.target ceph-mon.target
7 |
8 | [Install]
9 | WantedBy=multi-user.target ceph.target
--------------------------------------------------------------------------------
/roles/ceph-osd/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph Object Storage Daemon
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/scenarios/lvm-batch.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Use ceph-volume lvm batch to create osds
4 | ceph_volume:
5 | cluster: "{{ cluster }}"
6 | objectstore: "{{ osd_objectstore }}"
7 | batch_devices: "{{ _devices }}"
8 | dmcrypt: "{{ dmcrypt | default(omit) }}"
9 | crush_device_class: "{{ crush_device_class | default(omit) }}"
10 | osds_per_device: "{{ osds_per_device }}"
11 | block_db_size: "{{ block_db_size }}"
12 | block_db_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}"
13 | wal_devices: "{{ bluestore_wal_devices | unique if bluestore_wal_devices | length > 0 else omit }}"
14 | action: "batch"
15 | environment:
16 | CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
17 | CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
18 | CEPH_CONTAINER_BINARY: "{{ container_binary }}"
19 | PYTHONIOENCODING: utf-8
20 | when: _devices | default([]) | length > 0
21 | tags: prepare_osd
22 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/scenarios/lvm.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Use ceph-volume to create osds
3 | ceph_volume:
4 | cluster: "{{ cluster }}"
5 | objectstore: "{{ osd_objectstore }}"
6 | data: "{{ item.data }}"
7 | data_vg: "{{ item.data_vg | default(omit) }}"
8 | db: "{{ item.db | default(omit) }}"
9 | db_vg: "{{ item.db_vg | default(omit) }}"
10 | wal: "{{ item.wal | default(omit) }}"
11 | wal_vg: "{{ item.wal_vg | default(omit) }}"
12 | crush_device_class: "{{ item.crush_device_class | default(crush_device_class) | default(omit) }}"
13 | dmcrypt: "{{ dmcrypt | default(omit) }}"
14 | action: "{{ 'prepare' if containerized_deployment | bool else 'create' }}"
15 | environment:
16 | CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
17 | CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
18 | CEPH_CONTAINER_BINARY: "{{ container_binary }}"
19 | PYTHONIOENCODING: utf-8
20 | with_items: "{{ lvm_volumes }}"
21 | tags: prepare_osd
22 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-osd.service.j2"
5 | dest: /etc/systemd/system/ceph-osd@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph osds
10 |
11 | - name: Generate systemd ceph-osd target file
12 | ansible.builtin.copy:
13 | src: ceph-osd.target
14 | dest: /etc/systemd/system/ceph-osd.target
15 | mode: "0644"
16 | when: containerized_deployment | bool
17 |
18 | - name: Enable ceph-osd.target
19 | ansible.builtin.service:
20 | name: ceph-osd.target
21 | enabled: true
22 | daemon_reload: true
23 | when: containerized_deployment | bool
24 |
--------------------------------------------------------------------------------
/roles/ceph-osd/templates/ceph-osd.service.d-overrides.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
--------------------------------------------------------------------------------
/roles/ceph-osd/templates/tmpfiles_hugepage.j2:
--------------------------------------------------------------------------------
1 | {{ '# ' + ansible_managed }}
2 |
3 | {{ 'w /sys/kernel/mm/transparent_hugepage/enabled - - - - never' }}
4 |
--------------------------------------------------------------------------------
/roles/ceph-osd/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | container_bin_path: /opt/ceph-container/bin
3 |
--------------------------------------------------------------------------------
/roles/ceph-prometheus/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Service handler
3 | # We use the systemd module here so we can use the daemon_reload feature,
4 | # since we're shipping the .service file ourselves
5 | ansible.builtin.systemd:
6 | name: "{{ item }}"
7 | daemon_reload: true
8 | enabled: true
9 | state: restarted
10 | with_items:
11 | - 'alertmanager'
12 | - 'prometheus'
13 | when: not docker2podman | default(False) | bool
14 |
--------------------------------------------------------------------------------
/roles/ceph-prometheus/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Boris Ranto
5 | description: Configures Prometheus for Ceph Dashboard
6 | license: Apache
7 | min_ansible_version: '2.4'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-prometheus/tasks/setup_container.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include_tasks systemd.yml
3 | ansible.builtin.include_tasks: systemd.yml
4 |
5 | - name: Start prometheus services
6 | ansible.builtin.systemd:
7 | name: "{{ item }}"
8 | daemon_reload: true
9 | enabled: true
10 | state: started
11 | with_items:
12 | - prometheus
13 | - alertmanager
14 |
--------------------------------------------------------------------------------
/roles/ceph-prometheus/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ship systemd services
3 | ansible.builtin.template:
4 | src: "{{ item }}.j2"
5 | dest: "/etc/systemd/system/{{ item }}"
6 | owner: root
7 | group: root
8 | mode: "0644"
9 | with_items:
10 | - 'alertmanager.service'
11 | - 'prometheus.service'
12 | notify: Service handler
13 |
--------------------------------------------------------------------------------
/roles/ceph-prometheus/templates/alertmanager.yml.j2:
--------------------------------------------------------------------------------
1 | global:
2 | resolve_timeout: 5m
3 |
4 | route:
5 | group_by: ['alertname']
6 | group_wait: 10s
7 | group_interval: 10s
8 | repeat_interval: 1h
9 | receiver: 'ceph-dashboard'
10 | receivers:
11 | - name: 'ceph-dashboard'
12 | webhook_configs:
13 | {% for host in groups['mgrs'] | default(groups['mons']) %}
14 | - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_facts']['fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
15 | {% if dashboard_protocol == 'https' and alertmanager_dashboard_api_no_ssl_verify | bool %}
16 | http_config:
17 | tls_config:
18 | insecure_skip_verify: true
19 | {% endif %}
20 | {% endfor %}
21 |
--------------------------------------------------------------------------------
/roles/ceph-rbd-mirror/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-rbd
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-rbd-mirror/files/ceph-rbd-mirror.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=ceph target allowing to start/stop all ceph-rbd-mirror@.service instances at once
3 | PartOf=ceph.target
4 | Before=ceph.target
5 |
6 | [Install]
7 | WantedBy=multi-user.target ceph.target
--------------------------------------------------------------------------------
/roles/ceph-rbd-mirror/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph Mirror Agent
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Use systemd to manage container on Atomic host
3 | - name: Include_tasks systemd.yml
4 | ansible.builtin.include_tasks: systemd.yml
5 |
6 | - name: Systemd start rbd mirror container
7 | ansible.builtin.systemd:
8 | name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}
9 | state: started
10 | enabled: true
11 | masked: false
12 | daemon_reload: true
13 |
--------------------------------------------------------------------------------
/roles/ceph-rbd-mirror/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
5 | dest: /etc/systemd/system/ceph-rbd-mirror@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph rbdmirrors
10 |
11 | - name: Generate systemd ceph-rbd-mirror target file
12 | ansible.builtin.copy:
13 | src: ceph-rbd-mirror.target
14 | dest: /etc/systemd/system/ceph-rbd-mirror.target
15 | mode: "0644"
16 | when: containerized_deployment | bool
17 |
18 | - name: Enable ceph-rbd-mirror.target
19 | ansible.builtin.service:
20 | name: ceph-rbd-mirror.target
21 | enabled: true
22 | daemon_reload: true
23 | when: containerized_deployment | bool
24 |
--------------------------------------------------------------------------------
/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.d-overrides.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
--------------------------------------------------------------------------------
/roles/ceph-rgw-loadbalancer/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # You can override vars by using host or group vars
3 |
4 | ###########
5 | # GENERAL #
6 | ###########
7 |
8 | haproxy_frontend_port: 80
9 | haproxy_frontend_ssl_port: 443
10 | haproxy_frontend_ssl_certificate:
11 | haproxy_ssl_dh_param: 4096
12 | haproxy_ssl_ciphers:
13 | - EECDH+AESGCM
14 | - EDH+AESGCM
15 | haproxy_ssl_options:
16 | - no-sslv3
17 | - no-tlsv10
18 | - no-tlsv11
19 | - no-tls-tickets
20 | #
21 | # virtual_ips:
22 | # - 192.168.238.250
23 | # - 192.168.238.251
24 | #
25 | # virtual_ip_netmask: 24
26 | # virtual_ip_interface: ens33
27 |
--------------------------------------------------------------------------------
/roles/ceph-rgw-loadbalancer/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart haproxy
3 | ansible.builtin.service:
4 | name: haproxy
5 | state: restarted
6 |
7 | - name: Restart keepalived
8 | ansible.builtin.service:
9 | name: keepalived
10 | state: restarted
11 |
--------------------------------------------------------------------------------
/roles/ceph-rgw-loadbalancer/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Gui Hecheng
4 | description: Config HAProxy & Keepalived
5 | license: Apache
6 | min_ansible_version: '2.8'
7 | platforms:
8 | - name: EL
9 | versions:
10 | - 'all'
11 | galaxy_tags:
12 | - system
13 | dependencies: []
14 |
--------------------------------------------------------------------------------
/roles/ceph-rgw-loadbalancer/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include_tasks pre_requisite.yml
3 | ansible.builtin.include_tasks: pre_requisite.yml
4 |
5 | - name: Include_tasks start_rgw_loadbalancer.yml
6 | ansible.builtin.include_tasks: start_rgw_loadbalancer.yml
7 |
--------------------------------------------------------------------------------
/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Start haproxy
3 | ansible.builtin.service:
4 | name: haproxy
5 | state: started
6 | enabled: true
7 |
8 | - name: Start keepalived
9 | ansible.builtin.service:
10 | name: keepalived
11 | state: started
12 | enabled: true
13 |
--------------------------------------------------------------------------------
/roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | ! Configuration File for keepalived
3 |
4 | global_defs {
5 | router_id CEPH_RGW
6 | }
7 |
8 | vrrp_script check_haproxy {
9 | script "killall -0 haproxy"
10 | weight -20
11 | interval 2
12 | rise 2
13 | fall 2
14 | }
15 |
16 | {% for instance in vrrp_instances %}
17 | vrrp_instance {{ instance['name'] }} {
18 | state {{ 'MASTER' if inventory_hostname == instance['master'] else 'BACKUP' }}
19 | priority {{ '100' if inventory_hostname == instance['master'] else '90' }}
20 | interface {{ virtual_ip_interface }}
21 | virtual_router_id {{ 50 + loop.index }}
22 | advert_int 1
23 | authentication {
24 | auth_type PASS
25 | auth_pass 1234
26 | }
27 | virtual_ipaddress {
28 | {{ instance['vip'] }}/{{ virtual_ip_netmask }} dev {{ virtual_ip_interface }}
29 | }
30 | track_script {
31 | check_haproxy
32 | }
33 | }
34 |
35 | {% endfor %}
36 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: ceph-rgw
2 |
3 | Documentation is available at http://docs.ceph.com/ceph-ansible/.
4 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/files/ceph-radosgw.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=ceph target allowing to start/stop all ceph-radosgw@.service instances at once
3 | PartOf=ceph.target
4 | After=ceph-mon.target
5 | Before=ceph.target
6 | Wants=ceph.target ceph-mon.target
7 |
8 | [Install]
9 | WantedBy=multi-user.target ceph.target
--------------------------------------------------------------------------------
/roles/ceph-rgw/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart rgw
3 | ansible.builtin.service:
4 | name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
5 | state: restarted
6 | with_items: "{{ rgw_instances }}"
7 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Sébastien Han
5 | description: Installs Ceph Rados Gateway
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include common.yml
3 | ansible.builtin.include_tasks: common.yml
4 |
5 | - name: Include_tasks pre_requisite.yml
6 | ansible.builtin.include_tasks: pre_requisite.yml
7 |
8 | - name: Rgw pool creation tasks
9 | ansible.builtin.include_tasks: rgw_create_pools.yml
10 | run_once: true
11 | when: rgw_create_pools is defined
12 |
13 | - name: Include_tasks openstack-keystone.yml
14 | ansible.builtin.include_tasks: openstack-keystone.yml
15 | when: radosgw_keystone_ssl | bool
16 |
17 | - name: Include_tasks start_radosgw.yml
18 | ansible.builtin.include_tasks: start_radosgw.yml
19 | when:
20 | - not containerized_deployment | bool
21 |
22 | - name: Include start_docker_rgw.yml
23 | ansible.builtin.include_tasks: start_docker_rgw.yml
24 | when:
25 | - containerized_deployment | bool
26 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/start_docker_rgw.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include_task systemd.yml
3 | ansible.builtin.include_tasks: systemd.yml
4 |
5 | - name: Systemd start rgw container
6 | ansible.builtin.systemd:
7 | name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
8 | state: started
9 | enabled: true
10 | masked: false
11 | daemon_reload: true
12 | with_items: "{{ rgw_instances }}"
13 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/systemd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate systemd unit file
3 | ansible.builtin.template:
4 | src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
5 | dest: /etc/systemd/system/ceph-radosgw@.service
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | notify: Restart ceph rgws
10 |
11 | - name: Generate systemd ceph-radosgw target file
12 | ansible.builtin.copy:
13 | src: ceph-radosgw.target
14 | dest: /etc/systemd/system/ceph-radosgw.target
15 | mode: "0644"
16 | when: containerized_deployment | bool
17 |
18 | - name: Enable ceph-radosgw.target
19 | ansible.builtin.service:
20 | name: ceph-radosgw.target
21 | enabled: true
22 | daemon_reload: true
23 | when: containerized_deployment | bool
24 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/templates/ceph-rgw.service.d-overrides.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
--------------------------------------------------------------------------------
/roles/ceph-validate/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | company: Red Hat
4 | author: Andrew Schoen
5 | description: Validates Ceph config options
6 | license: Apache
7 | min_ansible_version: '2.7'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 'all'
12 | galaxy_tags:
13 | - system
14 | dependencies: []
15 |
--------------------------------------------------------------------------------
/roles/ceph-validate/tasks/check_ipaddr_mon.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check if network interface has an IP address in public_network
3 | ansible.builtin.fail:
4 | msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ public_network }}"
5 | when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['public_network'].split(',')) | length == 0
6 |
--------------------------------------------------------------------------------
/roles/ceph-validate/tasks/check_nfs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone)
3 | ansible.builtin.fail:
4 | msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True"
5 | when:
6 | - nfs_obj_gw | bool
7 | - groups.get(mon_group_name, []) | length == 0
8 | - (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined)
9 |
10 | - name: Fail on openSUSE Leap 15.x using distro packages
11 | ansible.builtin.fail:
12 | msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
13 | when:
14 | - ceph_origin == 'distro'
15 | - ansible_facts['distribution'] == 'openSUSE Leap'
16 |
--------------------------------------------------------------------------------
/roles/ceph-validate/tasks/check_pools.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Fail if target_size_ratio is not set when pg_autoscale_mode is True
3 | ansible.builtin.fail:
4 | msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
5 | with_items:
6 | - "{{ cephfs_pools | default([]) }}"
7 | - "{{ pools | default([]) }}"
8 | when:
9 | - item.pg_autoscale_mode | default(False) | bool
10 | - item.target_size_ratio is undefined
11 |
--------------------------------------------------------------------------------
/roles/ceph-validate/tasks/check_rbdmirror.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure ceph_rbd_mirror_pool is set
3 | ansible.builtin.fail:
4 | msg: "ceph_rbd_mirror_pool needs to be provided"
5 | when: ceph_rbd_mirror_pool | default("") | length == 0
6 |
7 | - name: Ensure ceph_rbd_mirror_remote_cluster is set
8 | ansible.builtin.fail:
9 | msg: "ceph_rbd_mirror_remote_cluster needs to be provided"
10 | when:
11 | - ceph_rbd_mirror_remote_cluster | default("") | length == 0
12 | - ceph_rbd_mirror_remote_user | default("") | length > 0
13 |
--------------------------------------------------------------------------------
/roles/ceph-validate/tasks/check_repository.yml:
--------------------------------------------------------------------------------
1 | - name: Validate ceph_origin
2 | ansible.builtin.fail:
3 | msg: "ceph_origin must be either 'repository', 'distro' or 'local'"
4 | when: ceph_origin not in ['repository', 'distro', 'local']
5 |
6 | - name: Validate ceph_repository
7 | ansible.builtin.fail:
8 | msg: "ceph_repository must be either 'community', 'obs', 'dev', 'custom' or 'uca'"
9 | when:
10 | - ceph_origin == 'repository'
11 | - ceph_repository not in ['community', 'obs', 'dev', 'custom', 'uca']
12 |
13 | - name: Validate ceph_repository_community
14 | ansible.builtin.fail:
15 | msg: "ceph_stable_release must be 'squid'"
16 | when:
17 | - ceph_origin == 'repository'
18 | - ceph_repository == 'community'
19 | - ceph_stable_release not in ['squid']
20 |
--------------------------------------------------------------------------------
/roles/ceph-validate/tasks/check_rgw_pools.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Fail if ec_profile is not set for ec pools
3 | ansible.builtin.fail:
4 | msg: "ec_profile must be set for ec pools"
5 | loop: "{{ rgw_create_pools | dict2items }}"
6 | when:
7 | - item.value.type is defined
8 | - item.value.type == 'ec'
9 | - item.value.ec_profile is undefined
10 |
11 | - name: Fail if ec_k is not set for ec pools
12 | ansible.builtin.fail:
13 | msg: "ec_k must be set for ec pools"
14 | loop: "{{ rgw_create_pools | dict2items }}"
15 | when:
16 | - item.value.type is defined
17 | - item.value.type == 'ec'
18 | - item.value.create_profile | default(true)
19 | - item.value.ec_k is undefined
20 |
21 | - name: Fail if ec_m is not set for ec pools
22 | ansible.builtin.fail:
23 | msg: "ec_m must be set for ec pools"
24 | loop: "{{ rgw_create_pools | dict2items }}"
25 | when:
26 | - item.value.type is defined
27 | - item.value.type == 'ec'
28 | - item.value.create_profile | default(true)
29 | - item.value.ec_m is undefined
30 |
--------------------------------------------------------------------------------
/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: true
4 | tasks:
5 | - import_role:
6 | name: ceph-common
7 | - import_role:
8 | name: ceph-mon
9 | - import_role:
10 | name: ceph-osd
11 | - import_role:
12 | name: ceph-mds
13 | - import_role:
14 | name: ceph-rgw
15 | - import_role:
16 | name: ceph-fetch-keys
17 |
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
1 | Functional tests
2 | ================
3 |
4 | These playbooks aim to individually validate each Ceph component.
5 | Some of them require packages to be installed.
6 | Ideally you will run these tests from a client machine or from the Ansible server.
7 |
--------------------------------------------------------------------------------
/tests/functional/.gitignore:
--------------------------------------------------------------------------------
1 | ubuntu-key/
2 | fetch/
3 | vagrant_ssh_config
4 |
--------------------------------------------------------------------------------
/tests/functional/add-mdss/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-mdss/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-mdss/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-mdss/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-mdss/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | docker: True
3 | ceph_origin: repository
4 | ceph_repository: community
5 | containerized_deployment: true
6 | cluster: ceph
7 | public_network: "192.168.63.0/24"
8 | cluster_network: "192.168.64.0/24"
9 | radosgw_interface: eth1
10 | journal_size: 100
11 | osd_objectstore: "bluestore"
12 | copy_admin_key: true
13 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
14 | lvm_volumes:
15 | - data: data-lv1
16 | data_vg: test_group
17 | - data: data-lv2
18 | data_vg: test_group
19 | db: journal1
20 | db_vg: journals
21 | os_tuning_params:
22 | - { name: fs.file-max, value: 26234859 }
23 | ceph_conf_overrides:
24 | global:
25 | mon_allow_pool_size_one: true
26 | mon_warn_on_pool_no_redundancy: false
27 | osd_pool_default_size: 1
28 | dashboard_enabled: False
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/add-mdss/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-mdss/container/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mdss]
8 | mds0
9 |
--------------------------------------------------------------------------------
/tests/functional/add-mdss/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.61.0/24"
6 | cluster_network: "192.168.62.0/24"
7 | radosgw_interface: eth1
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | copy_admin_key: true
11 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
12 | lvm_volumes:
13 | - data: data-lv1
14 | data_vg: test_group
15 | - data: data-lv2
16 | data_vg: test_group
17 | db: journal1
18 | db_vg: journals
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
--------------------------------------------------------------------------------
/tests/functional/add-mdss/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-mdss/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mdss]
8 | mds0
9 |
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | docker: True
3 | ceph_origin: repository
4 | ceph_repository: community
5 | containerized_deployment: true
6 | cluster: ceph
7 | public_network: "192.168.75.0/24"
8 | cluster_network: "192.168.76.0/24"
9 | radosgw_interface: eth1
10 | journal_size: 100
11 | osd_objectstore: "bluestore"
12 | copy_admin_key: true
13 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
14 | lvm_volumes:
15 | - data: data-lv1
16 | data_vg: test_group
17 | - data: data-lv2
18 | data_vg: test_group
19 | db: journal1
20 | db_vg: journals
21 | os_tuning_params:
22 | - { name: fs.file-max, value: 26234859 }
23 | ceph_conf_overrides:
24 | global:
25 | mon_allow_pool_size_one: true
26 | mon_warn_on_pool_no_redundancy: false
27 | osd_pool_default_size: 1
28 | dashboard_enabled: False
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/container/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mgrs]
8 | mgr0
9 |
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.73.0/24"
6 | cluster_network: "192.168.74.0/24"
7 | radosgw_interface: eth1
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | copy_admin_key: true
11 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
12 | lvm_volumes:
13 | - data: data-lv1
14 | data_vg: test_group
15 | - data: data-lv2
16 | data_vg: test_group
17 | db: journal1
18 | db_vg: journals
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
27 |
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-mgrs/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mgrs]
8 | mgr0
9 |
--------------------------------------------------------------------------------
/tests/functional/add-mons/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-mons/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-mons/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-mons/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-mons/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | docker: True
3 | ceph_origin: repository
4 | ceph_repository: community
5 | containerized_deployment: true
6 | cluster: ceph
7 | public_network: "192.168.55.0/24"
8 | cluster_network: "192.168.56.0/24"
9 | radosgw_interface: eth1
10 | journal_size: 100
11 | osd_objectstore: "bluestore"
12 | copy_admin_key: true
13 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
14 | lvm_volumes:
15 | - data: data-lv1
16 | data_vg: test_group
17 | - data: data-lv2
18 | data_vg: test_group
19 | db: journal1
20 | db_vg: journals
21 | os_tuning_params:
22 | - { name: fs.file-max, value: 26234859 }
23 | ceph_conf_overrides:
24 | global:
25 | mon_allow_pool_size_one: true
26 | mon_warn_on_pool_no_redundancy: false
27 | osd_pool_default_size: 1
28 | dashboard_enabled: False
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/add-mons/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-mons/container/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 |
5 | [osds]
6 | osd0
7 |
--------------------------------------------------------------------------------
/tests/functional/add-mons/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.53.0/24"
6 | cluster_network: "192.168.54.0/24"
7 | radosgw_interface: eth1
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | copy_admin_key: true
11 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
12 | lvm_volumes:
13 | - data: data-lv1
14 | data_vg: test_group
15 | - data: data-lv2
16 | data_vg: test_group
17 | db: journal1
18 | db_vg: journals
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
27 |
--------------------------------------------------------------------------------
/tests/functional/add-mons/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-mons/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 |
5 | [osds]
6 | osd0
7 |
--------------------------------------------------------------------------------
/tests/functional/add-osds/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-osds/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-osds/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-osds/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-osds/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | docker: True
3 | ceph_origin: repository
4 | ceph_repository: community
5 | containerized_deployment: true
6 | cluster: ceph
7 | public_network: "192.168.55.0/24"
8 | cluster_network: "192.168.56.0/24"
9 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
10 | journal_size: 100
11 | osd_objectstore: "bluestore"
12 | copy_admin_key: true
13 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
14 | lvm_volumes:
15 | - data: data-lv1
16 | data_vg: test_group
17 | - data: data-lv2
18 | data_vg: test_group
19 | db: journal1
20 | db_vg: journals
21 | os_tuning_params:
22 | - { name: fs.file-max, value: 26234859 }
23 | ceph_conf_overrides:
24 | global:
25 | mon_allow_pool_size_one: true
26 | mon_warn_on_pool_no_redundancy: false
27 | osd_pool_default_size: 1
28 | dashboard_enabled: False
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/add-osds/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-osds/container/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 | osd1
7 |
--------------------------------------------------------------------------------
/tests/functional/add-osds/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.53.0/24"
6 | cluster_network: "192.168.54.0/24"
7 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | copy_admin_key: true
11 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
12 | lvm_volumes:
13 | - data: data-lv1
14 | data_vg: test_group
15 | - data: data-lv2
16 | data_vg: test_group
17 | db: journal1
18 | db_vg: journals
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
--------------------------------------------------------------------------------
/tests/functional/add-osds/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-osds/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 | osd1
7 |
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | docker: True
3 | ceph_origin: repository
4 | ceph_repository: community
5 | containerized_deployment: true
6 | cluster: ceph
7 | public_network: "192.168.67.0/24"
8 | cluster_network: "192.168.68.0/24"
9 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
10 | journal_size: 100
11 | osd_objectstore: "bluestore"
12 | copy_admin_key: true
13 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
14 | lvm_volumes:
15 | - data: data-lv1
16 | data_vg: test_group
17 | - data: data-lv2
18 | data_vg: test_group
19 | db: journal1
20 | db_vg: journals
21 | os_tuning_params:
22 | - { name: fs.file-max, value: 26234859 }
23 | ceph_conf_overrides:
24 | global:
25 | mon_allow_pool_size_one: true
26 | mon_warn_on_pool_no_redundancy: false
27 | osd_pool_default_size: 1
28 | dashboard_enabled: False
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/container/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rbdmirrors]
8 | rbd-mirror0
9 |
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.65.0/24"
6 | cluster_network: "192.168.66.0/24"
7 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | copy_admin_key: true
11 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
12 | lvm_volumes:
13 | - data: data-lv1
14 | data_vg: test_group
15 | - data: data-lv2
16 | data_vg: test_group
17 | db: journal1
18 | db_vg: journals
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-rbdmirrors/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rbdmirrors]
8 | rbd-mirror0
9 |
--------------------------------------------------------------------------------
/tests/functional/add-rgws/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-rgws/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-rgws/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/add-rgws/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/add-rgws/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-rgws/container/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rgws]
8 | rgw0
9 |
--------------------------------------------------------------------------------
/tests/functional/add-rgws/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.69.0/24"
6 | cluster_network: "192.168.70.0/24"
7 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | copy_admin_key: true
11 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
12 | lvm_volumes:
13 | - data: data-lv1
14 | data_vg: test_group
15 | - data: data-lv2
16 | data_vg: test_group
17 | db: journal1
18 | db_vg: journals
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
--------------------------------------------------------------------------------
/tests/functional/add-rgws/group_vars/rgws:
--------------------------------------------------------------------------------
1 | copy_admin_key: true
2 | rgw_create_pools:
3 | foo:
4 | pg_num: 16
5 | type: replicated
6 | bar:
7 | pg_num: 16
8 | rgw_override_bucket_index_max_shards: 16
9 | rgw_bucket_default_quota_max_objects: 1638400
10 |
--------------------------------------------------------------------------------
/tests/functional/add-rgws/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/add-rgws/hosts-2:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rgws]
8 | rgw0
9 |
--------------------------------------------------------------------------------
/tests/functional/all-in-one/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/all-in-one/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/all-in-one/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/all-in-one/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/all-in-one/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | osd0
3 | osd1
4 | osd2
5 |
6 | [mgrs]
7 | osd0
8 | osd1
9 | osd2
10 |
11 | [osds]
12 | osd0
13 | osd1
14 | osd2
15 |
16 | [mdss]
17 | osd0
18 |
19 | [rgws]
20 | osd0
21 |
--------------------------------------------------------------------------------
/tests/functional/all-in-one/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | osd0
3 | osd1
4 | osd2
5 |
6 | [mgrs]
7 | osd0
8 | osd1
9 | osd2
10 |
11 | [osds]
12 | osd0
13 | osd1
14 | osd2
15 |
16 | [mdss]
17 | osd0
18 |
19 | [rgws]
20 | osd0
21 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | user_config: True
3 | copy_admin_key: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | size: 1
8 | application: "rbd"
9 | test2:
10 | name: "test2"
11 | size: 1
12 | application: "rbd"
13 | pools:
14 | - "{{ test }}"
15 | - "{{ test2 }}"
16 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: True
3 | crush_rule_config: True
4 | crush_rule_hdd:
5 | name: HDD
6 | root: default
7 | type: host
8 | class: hdd
9 | default: true
10 | crush_rules:
11 | - "{{ crush_rule_hdd }}"
12 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | lvm_volumes:
3 | - data: data-lv1
4 | data_vg: test_group
5 | - data: data-lv2
6 | data_vg: test_group
7 | db: journal1
8 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | rgw_create_pools:
4 | foo:
5 | pg_num: 16
6 | type: replicated
7 | bar:
8 | pg_num: 16
9 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mgr0
8 |
9 | [osds]
10 | osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
11 | osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
12 | osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
13 |
14 | [mdss]
15 | mds0
16 | mds1
17 | mds2
18 |
19 | [rgws]
20 | rgw0
21 |
22 | #[nfss]
23 | #nfs0
24 |
25 | [clients]
26 | client0
27 | client1
28 |
29 | [rbdmirrors]
30 | rbd-mirror0
31 |
32 | [monitoring]
33 | mon0
34 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | user_config: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | size: 1
8 | test2:
9 | name: "test2"
10 | size: 1
11 | pools:
12 | - "{{ test }}"
13 | - "{{ test2 }}"
14 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: true
3 | crush_rule_config: true
4 | crush_rule_hdd:
5 | name: HDD
6 | root: default
7 | type: host
8 | class: hdd
9 | default: true
10 | crush_rules:
11 | - "{{ crush_rule_hdd }}"
12 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/group_vars/nfss:
--------------------------------------------------------------------------------
1 | copy_admin_key: true
2 | nfs_file_gw: false
3 | nfs_obj_gw: true
4 | ganesha_conf_overrides: |
5 | CACHEINODE {
6 | Entries_HWMark = 100000;
7 | }
8 | nfs_ganesha_stable: false
9 | nfs_ganesha_dev: true
10 | nfs_ganesha_flavor: "ceph_main"
11 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | lvm_volumes:
5 | - data: data-lv1
6 | data_vg: test_group
7 | - data: data-lv2
8 | data_vg: test_group
9 | db: journal1
10 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/all_daemons/group_vars/rgws:
--------------------------------------------------------------------------------
1 | copy_admin_key: true
2 | rgw_create_pools:
3 | foo:
4 | pg_num: 16
5 | type: replicated
6 | bar:
7 | pg_num: 16
8 | rgw_override_bucket_index_max_shards: 16
9 | rgw_bucket_default_quota_max_objects: 1638400
10 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mgr0
8 |
9 | [osds]
10 | osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
11 | osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
12 | osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
13 |
14 | [mdss]
15 | mds0
16 | mds1
17 | mds2
18 |
19 | [rgws]
20 | rgw0
21 |
22 | [clients]
23 | client0
24 | client1
25 |
26 | #[nfss]
27 | #nfs0
28 |
29 | [rbdmirrors]
30 | rbd-mirror0
31 |
32 | [ceph_monitoring]
33 | mon0
34 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons/hosts-switch-to-containers:
--------------------------------------------------------------------------------
1 | [all:vars]
2 | docker=True
3 |
4 | [mons]
5 | mon0
6 | mon1
7 | mon2
8 |
9 | [mgrs]
10 | mgr0
11 |
12 | [osds]
13 | osd0
14 |
15 | [mdss]
16 | mds0
17 | mds1
18 | mds2
19 |
20 | [rgws]
21 | rgw0
22 |
23 | [clients]
24 | client0
25 |
26 | [monitoring]
27 | mon0
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | user_config: True
3 | copy_admin_key: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | size: 1
8 | test2:
9 | name: "test2"
10 | size: 1
11 | pools:
12 | - "{{ test }}"
13 | - "{{ test2 }}"
14 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: True
3 | crush_rule_config: True
4 | crush_rule_hdd:
5 | name: HDD
6 | root: default
7 | type: host
8 | class: hdd
9 | default: true
10 | crush_rules:
11 | - "{{ crush_rule_hdd }}"
12 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | lvm_volumes:
3 | - data: data-lv1
4 | data_vg: test_group
5 | - data: data-lv2
6 | data_vg: test_group
7 | db: journal1
8 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | rgw_create_pools:
4 | foo:
5 | pg_num: 16
6 | type: replicated
7 | bar:
8 | pg_num: 16
9 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mgr0
8 |
9 | [osds]
10 | osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
11 | osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
12 | osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
13 |
14 | [mdss]
15 | mds0
16 | mds1
17 | mds2
18 |
19 | [rgws]
20 | rgw0
21 |
22 | #[nfss]
23 | #nfs0
24 |
25 | [clients]
26 | client0
27 | client1
28 |
29 | [rbdmirrors]
30 | rbd-mirror0
31 |
32 | [monitoring]
33 | mon0
34 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | user_config: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | size: 1
8 | test2:
9 | name: "test2"
10 | size: 1
11 | pools:
12 | - "{{ test }}"
13 | - "{{ test2 }}"
14 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: True
3 | crush_rule_config: True
4 | crush_rule_hdd:
5 | name: HDD
6 | root: default
7 | type: host
8 | class: hdd
9 | default: true
10 | crush_rules:
11 | - "{{ crush_rule_hdd }}"
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/group_vars/nfss:
--------------------------------------------------------------------------------
1 | copy_admin_key: true
2 | nfs_file_gw: false
3 | nfs_obj_gw: true
4 | ganesha_conf_overrides: |
5 | CACHEINODE {
6 | Entries_HWMark = 100000;
7 | }
8 | nfs_ganesha_stable: true
9 | nfs_ganesha_dev: false
10 | nfs_ganesha_flavor: "ceph_main"
11 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | lvm_volumes:
5 | - data: data-lv1
6 | data_vg: test_group
7 | - data: data-lv2
8 | data_vg: test_group
9 | db: journal1
10 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/group_vars/rgws:
--------------------------------------------------------------------------------
1 | copy_admin_key: true
2 | rgw_create_pools:
3 | foo:
4 | pg_num: 16
5 | type: replicated
6 | bar:
7 | pg_num: 16
8 | rgw_override_bucket_index_max_shards: 16
9 | rgw_bucket_default_quota_max_objects: 1638400
10 |
--------------------------------------------------------------------------------
/tests/functional/all_daemons_ipv6/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mgr0
8 |
9 | [osds]
10 | osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
11 | osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
12 | osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
13 |
14 | [mdss]
15 | mds0
16 | mds1
17 | mds2
18 |
19 | [rgws]
20 | rgw0
21 |
22 | [clients]
23 | client0
24 | client1
25 |
26 | #[nfss]
27 | #nfs0
28 |
29 | [rbdmirrors]
30 | rbd-mirror0
31 |
32 | [ceph_monitoring]
33 | mon0
34 |
--------------------------------------------------------------------------------
/tests/functional/cephadm/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/cephadm/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | public_network: "192.168.30.0/24"
3 | cluster_network: "192.168.31.0/24"
4 | dashboard_admin_password: $sX!cD$rYU6qR^B!
5 | ceph_docker_registry: quay.io
6 | ceph_docker_image: ceph/daemon-base
7 | ceph_docker_image_tag: latest-main-devel
8 | containerized_deployment: true
9 |
--------------------------------------------------------------------------------
/tests/functional/cephadm/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mon0
8 | mon1
9 | mon2
10 |
11 | [osds]
12 | osd0
13 | osd1
14 |
15 | [mdss]
16 | mds0
17 |
18 | [rgws]
19 | rgw0
20 |
21 | [nfss]
22 | nfs0
23 |
24 | [rbdmirrors]
25 | rbd-mirror0
26 |
27 | [monitoring]
28 | mon0
29 |
--------------------------------------------------------------------------------
/tests/functional/cephadm/vagrant_variables.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # DEPLOY CONTAINERIZED DAEMONS
4 | docker: True
5 |
6 | # DEFINE THE NUMBER OF VMS TO RUN
7 | mon_vms: 3
8 | osd_vms: 2
9 | mds_vms: 1
10 | rgw_vms: 1
11 | nfs_vms: 1
12 | grafana_server_vms: 0
13 | rbd_mirror_vms: 1
14 | client_vms: 0
15 | mgr_vms: 0
16 |
17 | # SUBNETS TO USE FOR THE VMS
18 | public_subnet: 192.168.30
19 | cluster_subnet: 192.168.31
20 |
21 | # MEMORY
22 | # set 1024 for CentOS
23 | memory: 2048
24 |
25 | vagrant_box: centos/stream9
26 | # The sync directory changes based on vagrant box
27 | # Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
28 | #vagrant_sync_dir: /home/vagrant/sync
29 | vagrant_sync_dir: /vagrant
30 | # Disables synced folder creation. Not needed for testing, will skip mounting
31 | # the vagrant directory on the remote box regardless of the provider.
32 | vagrant_disable_synced_folder: true
33 |
--------------------------------------------------------------------------------
/tests/functional/collocation/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/collocation/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/collocation/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/collocation/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/collocation/container/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | user_config: True
3 | test:
4 | name: "test"
5 | rule_name: "HDD"
6 | test2:
7 | name: "test2"
8 | rule_name: "HDD"
9 | pools:
10 | - "{{ test }}"
11 | - "{{ test2 }}"
12 |
--------------------------------------------------------------------------------
/tests/functional/collocation/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/collocation/container/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | rgw_create_pools:
3 | foo:
4 | pg_num: 16
5 | type: replicated
6 | bar:
7 | pg_num: 16
8 |
--------------------------------------------------------------------------------
/tests/functional/collocation/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [osds]
7 | osd0
8 | osd1
9 |
10 | [mdss]
11 | mds0
12 | rgw0
13 |
14 | [rgws]
15 | rgw0
16 | mds0
17 |
18 | [rbdmirrors]
19 | rgw0
20 | mds0
21 |
22 | #[nfss]
23 | #rgw0
24 | #mds0
25 |
26 | [monitoring]
27 | mon0
--------------------------------------------------------------------------------
/tests/functional/collocation/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | user_config: True
3 | test:
4 | name: "test"
5 | rule_name: "HDD"
6 | test2:
7 | name: "test2"
8 | rule_name: "HDD"
9 | pools:
10 | - "{{ test }}"
11 | - "{{ test2 }}"
12 |
--------------------------------------------------------------------------------
/tests/functional/collocation/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/collocation/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | rgw_create_pools:
3 | foo:
4 | pg_num: 16
5 | type: replicated
6 | bar:
7 | pg_num: 16
8 |
--------------------------------------------------------------------------------
/tests/functional/collocation/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [osds]
7 | osd0
8 | osd1
9 |
10 | [mdss]
11 | mds0
12 | rgw0
13 |
14 | [rgws]
15 | osd0
16 | rgw0
17 | mds0
18 |
19 | [rbdmirrors]
20 | rgw0
21 | mds0
22 |
23 | #[nfss]
24 | #rgw0
25 | #mds0
26 |
27 | [monitoring]
28 | mon0
--------------------------------------------------------------------------------
/tests/functional/docker2podman/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/docker2podman/ceph-override.json:
--------------------------------------------------------------------------------
1 | {
2 | "ceph_conf_overrides": {
3 | "global": {
4 | "osd_pool_default_pg_num": 12,
5 | "osd_pool_default_size": 1,
6 | "mon_allow_pool_size_one": true,
7 | "mon_warn_on_pool_no_redundancy": false
8 | }
9 | },
10 | "cephfs_pools": [
11 | {
12 | "name": "cephfs_data",
13 | "pg_num": 8,
14 | "pgp_num": 8,
15 | "rule_name": "replicated_rule",
16 | "type": 1,
17 | "erasure_profile": "",
18 | "expected_num_objects": "",
19 | "application": "cephfs",
20 | "size": 3,
21 | "min_size": 0
22 | },
23 | {
24 | "name": "cephfs_metadata",
25 | "pg_num": 8,
26 | "pgp_num": 8,
27 | "rule_name": "replicated_rule",
28 | "type": 1,
29 | "erasure_profile": "",
30 | "expected_num_objects": "",
31 | "application": "cephfs",
32 | "size": 3,
33 | "min_size": 0
34 | }
35 | ],
36 | "ceph_mon_docker_memory_limit": "2g"
37 | }
38 |
--------------------------------------------------------------------------------
/tests/functional/docker2podman/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | user_config: True
3 | copy_admin_key: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | test2:
8 | name: "test2"
9 | rule_name: "HDD"
10 | pools:
11 | - "{{ test }}"
12 | - "{{ test2 }}"
13 |
--------------------------------------------------------------------------------
/tests/functional/docker2podman/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/docker2podman/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
4 | crush_rule_hdd:
5 | name: HDD
6 | root: default
7 | type: host
8 | class: hdd
9 | default: true
10 | crush_rules:
11 | - "{{ crush_rule_hdd }}"
12 |
--------------------------------------------------------------------------------
/tests/functional/docker2podman/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/docker2podman/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | rgw_create_pools:
4 | foo:
5 | pg_num: 16
6 | bar:
7 | pg_num: 16
8 |
--------------------------------------------------------------------------------
/tests/functional/docker2podman/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mgrs]
8 | mon0
9 |
10 | [monitoring]
11 | mon0
--------------------------------------------------------------------------------
/tests/functional/docker2podman/vagrant_variables.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # DEPLOY CONTAINERIZED DAEMONS
4 | docker: True
5 |
6 | # DEFINE THE NUMBER OF VMS TO RUN
7 | mon_vms: 1
8 | osd_vms: 1
9 | mds_vms: 0
10 | rgw_vms: 0
11 | nfs_vms: 0
12 | grafana_server_vms: 0
13 | rbd_mirror_vms: 0
14 | client_vms: 0
15 | mgr_vms: 0
16 |
17 | # SUBNETS TO USE FOR THE VMS
18 | public_subnet: 192.168.58
19 | cluster_subnet: 192.168.59
20 |
21 | # MEMORY
22 | # set 1024 for CentOS
23 | memory: 2048
24 |
25 | vagrant_box: centos/stream9
26 | # The sync directory changes based on vagrant box
27 | # Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
28 | #vagrant_sync_dir: /home/vagrant/sync
29 | vagrant_sync_dir: /vagrant
30 | # Disables synced folder creation. Not needed for testing, will skip mounting
31 | # the vagrant directory on the remote box regardless of the provider.
32 | vagrant_disable_synced_folder: true
33 |
--------------------------------------------------------------------------------
/tests/functional/external_clients/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/external_clients/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/external_clients/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/external_clients/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/external_clients/container/inventory/external_clients-hosts:
--------------------------------------------------------------------------------
1 | [clients]
2 | client0
3 | client1
--------------------------------------------------------------------------------
/tests/functional/external_clients/container/inventory/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | user_config: True
4 | test:
5 | name: "test"
6 | test2:
7 | name: "test2"
8 | pools:
9 | - "{{ test }}"
10 | - "{{ test2 }}"
--------------------------------------------------------------------------------
/tests/functional/external_clients/container/inventory/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mon0
8 |
--------------------------------------------------------------------------------
/tests/functional/external_clients/inventory/external_clients-hosts:
--------------------------------------------------------------------------------
1 | [clients]
2 | client0
3 | client1
--------------------------------------------------------------------------------
/tests/functional/external_clients/inventory/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | user_config: True
4 | test:
5 | name: "test"
6 | test2:
7 | name: "test2"
8 | pools:
9 | - "{{ test }}"
10 | - "{{ test2 }}"
--------------------------------------------------------------------------------
/tests/functional/external_clients/inventory/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mon0
8 |
--------------------------------------------------------------------------------
/tests/functional/external_clients_admin_key.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: clients
3 | gather_facts: false
4 | become: yes
5 | tasks:
6 |
7 | - name: get keys from monitors
8 | command: "{{ 'podman exec ceph-mon-mon0' if containerized_deployment | bool else '' }} ceph --cluster ceph auth get client.admin"
9 | register: _key
10 | delegate_to: "{{ groups.get('mons')[0] }}"
11 | run_once: true
12 |
13 | - name: create /etc/ceph
14 | file:
15 | path: /etc/ceph
16 | state: directory
17 | owner: 167
18 | group: 167
19 | mode: "0755"
20 |
21 | - name: copy ceph key(s) if needed
22 | copy:
23 | dest: "/etc/ceph/ceph.client.admin.keyring"
24 | content: "{{ _key.stdout + '\n' }}"
25 | owner: 167
26 | group: 167
27 | mode: "0600"
28 |
--------------------------------------------------------------------------------
/tests/functional/infra_lv_create/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/infra_lv_create/hosts:
--------------------------------------------------------------------------------
1 | [osds]
2 | osd0
3 |
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # this is only here to let the CI tests know
4 | # that this scenario is using docker
5 | docker: True
6 |
7 | containerized_deployment: True
8 | ceph_origin: repository
9 | ceph_repository: community
10 | cluster: ceph
11 | public_network: "192.168.39.0/24"
12 | cluster_network: "192.168.40.0/24"
13 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
14 | journal_size: 100
15 | osd_objectstore: "bluestore"
16 | crush_device_class: test
17 | copy_admin_key: true
18 | osd_auto_discovery: true
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
27 | handler_health_mon_check_delay: 10
28 | handler_health_osd_check_delay: 10
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
32 |
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ceph_origin: repository
4 | ceph_repository: community
5 | cluster: ceph
6 | public_network: "192.168.39.0/24"
7 | cluster_network: "192.168.40.0/24"
8 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
9 | osd_objectstore: "bluestore"
10 | crush_device_class: test
11 | copy_admin_key: true
12 | osd_auto_discovery: true
13 | os_tuning_params:
14 | - { name: fs.file-max, value: 26234859 }
15 | ceph_conf_overrides:
16 | global:
17 | mon_allow_pool_size_one: true
18 | mon_warn_on_pool_no_redundancy: false
19 | osd_pool_default_size: 1
20 | dashboard_enabled: False
21 | handler_health_mon_check_delay: 10
22 | handler_health_osd_check_delay: 10
--------------------------------------------------------------------------------
/tests/functional/lvm-auto-discovery/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # this is only here to let the CI tests know
4 | # that this scenario is using docker
5 | docker: True
6 |
7 | containerized_deployment: True
8 | ceph_origin: repository
9 | ceph_repository: community
10 | cluster: ceph
11 | public_network: "192.168.39.0/24"
12 | cluster_network: "192.168.40.0/24"
13 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
14 | crush_device_class: test
15 | copy_admin_key: true
16 | devices:
17 | - /dev/sdb
18 | - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00003
19 | os_tuning_params:
20 | - { name: fs.file-max, value: 26234859 }
21 | ceph_conf_overrides:
22 | global:
23 | mon_allow_pool_size_one: true
24 | mon_warn_on_pool_no_redundancy: false
25 | osd_pool_default_size: 1
26 | dashboard_enabled: False
27 | handler_health_mon_check_delay: 10
28 | handler_health_osd_check_delay: 10
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ceph_origin: repository
4 | ceph_repository: community
5 | cluster: ceph
6 | public_network: "192.168.39.0/24"
7 | cluster_network: "192.168.40.0/24"
8 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
9 | crush_device_class: test
10 | copy_admin_key: true
11 | devices:
12 | - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002
13 | - /dev/sdc
14 | os_tuning_params:
15 | - { name: fs.file-max, value: 26234859 }
16 | ceph_conf_overrides:
17 | global:
18 | mon_allow_pool_size_one: true
19 | mon_warn_on_pool_no_redundancy: false
20 | osd_pool_default_size: 1
21 | dashboard_enabled: False
22 | handler_health_mon_check_delay: 10
23 | handler_health_osd_check_delay: 10
--------------------------------------------------------------------------------
/tests/functional/lvm-batch/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # this is only here to let the CI tests know
4 | # that this scenario is using docker
5 | docker: True
6 |
7 | ceph_origin: repository
8 | ceph_repository: community
9 | public_network: "192.168.33.0/24"
10 | cluster_network: "192.168.34.0/24"
11 | copy_admin_key: true
12 | containerized_deployment: true
13 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
14 | os_tuning_params:
15 | - { name: fs.file-max, value: 26234859 }
16 | ceph_conf_overrides:
17 | global:
18 | mon_allow_pool_size_one: true
19 | mon_warn_on_pool_no_redundancy: false
20 | osd_pool_default_size: 1
21 | dashboard_enabled: False
22 | handler_health_mon_check_delay: 10
23 | handler_health_osd_check_delay: 10
24 | ceph_docker_registry: quay.io
25 | ceph_docker_image: ceph/daemon-base
26 | ceph_docker_image_tag: latest-main
27 |
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
6 | osd1 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
7 | osd2 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
8 | osd3 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
9 |
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ceph_origin: repository
4 | ceph_repository: community
5 | public_network: "192.168.39.0/24"
6 | cluster_network: "192.168.40.0/24"
7 | copy_admin_key: true
8 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
9 | os_tuning_params:
10 | - { name: fs.file-max, value: 26234859 }
11 | ceph_conf_overrides:
12 | global:
13 | mon_allow_pool_size_one: true
14 | mon_warn_on_pool_no_redundancy: false
15 | osd_pool_default_size: 1
16 | dashboard_enabled: False
17 | handler_health_mon_check_delay: 10
18 | handler_health_osd_check_delay: 10
19 |
20 |
--------------------------------------------------------------------------------
/tests/functional/lvm-osds/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
6 | osd1 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
7 | osd2 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
8 | osd3 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
9 |
--------------------------------------------------------------------------------
/tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ceph_origin: repository
4 | ceph_repository: community
5 | cluster: test
6 | public_network: "192.168.1.0/24"
7 | cluster_network: "192.168.2.0/24"
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | devices:
11 | - '/dev/sdb'
12 | - '/dev/sdc'
13 | osd_scenario: "collocated"
14 | copy_admin_key: false
15 | os_tuning_params:
16 | - { name: kernel.pid_max, value: 4194303 }
17 | - { name: fs.file-max, value: 26234859 }
18 | ceph_conf_overrides:
19 | global:
20 | osd_pool_default_pg_num: 8
21 | mon_allow_pool_size_one: true
22 | mon_warn_on_pool_no_redundancy: false
23 | osd_pool_default_size: 1
24 | dashboard_admin_password: $sX!cD$rYU6qR^B!
25 | grafana_admin_password: +xFRe+RES@7vg24n
--------------------------------------------------------------------------------
/tests/functional/migrate_ceph_disk_to_ceph_volume/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [osds]
7 | osd0
8 |
9 | [mgrs]
10 | mon0
11 |
--------------------------------------------------------------------------------
/tests/functional/podman/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/podman/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/podman/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | user_config: True
3 | copy_admin_key: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | test2:
8 | name: "test2"
9 | rule_name: "HDD"
10 | pools:
11 | - "{{ test }}"
12 | - "{{ test2 }}"
13 |
--------------------------------------------------------------------------------
/tests/functional/podman/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/podman/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: True
3 | crush_rule_config: True
4 | crush_rule_hdd:
5 | name: HDD
6 | root: default
7 | type: host
8 | class: hdd
9 | default: true
10 | crush_rules:
11 | - "{{ crush_rule_hdd }}"
12 |
--------------------------------------------------------------------------------
/tests/functional/podman/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/podman/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | rgw_create_pools:
4 | foo:
5 | pg_num: 16
6 | type: replicated
7 | bar:
8 |
--------------------------------------------------------------------------------
/tests/functional/podman/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [osds]
7 | osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
8 | osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
9 |
10 | [mdss]
11 | mds0
12 |
13 | [rgws]
14 | rgw0
15 |
16 | #[nfss]
17 | #nfs0
18 |
19 | [clients]
20 | client0
21 | client1
22 |
23 | [rbdmirrors]
24 | rbd-mirror0
25 |
26 | [monitoring]
27 | mon0
28 |
29 | #[all:vars]
30 | #ansible_python_interpreter=/usr/bin/python3
--------------------------------------------------------------------------------
/tests/functional/podman/vagrant_variables.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # DEPLOY CONTAINERIZED DAEMONS
4 | docker: True
5 |
6 | # DEFINE THE NUMBER OF VMS TO RUN
7 | mon_vms: 3
8 | osd_vms: 2
9 | mds_vms: 1
10 | rgw_vms: 1
11 | nfs_vms: 0
12 | grafana_server_vms: 0
13 | rbd_mirror_vms: 1
14 | client_vms: 2
15 | mgr_vms: 0
16 |
17 | # SUBNETS TO USE FOR THE VMS
18 | public_subnet: 192.168.30
19 | cluster_subnet: 192.168.31
20 |
21 | # MEMORY
22 | # set 1024 for CentOS
23 | memory: 2048
24 |
25 | vagrant_box: centos/stream9
26 | # The sync directory changes based on vagrant box
27 | # Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
28 | #vagrant_sync_dir: /home/vagrant/sync
29 | vagrant_sync_dir: /vagrant
30 | # Disables synced folder creation. Not needed for testing, will skip mounting
31 | # the vagrant directory on the remote box regardless of the provider.
32 | vagrant_disable_synced_folder: true
33 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | docker: True
3 | containerized_deployment: true
4 | ceph_origin: repository
5 | ceph_repository: community
6 | cluster: ceph
7 | public_network: "192.168.144.0/24"
8 | cluster_network: "192.168.145.0/24"
9 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
10 | journal_size: 100
11 | osd_objectstore: "bluestore"
12 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
13 | lvm_volumes:
14 | - data: data-lv1
15 | data_vg: test_group
16 | - data: data-lv2
17 | data_vg: test_group
18 | db: journal1
19 | db_vg: journals
20 | os_tuning_params:
21 | - { name: fs.file-max, value: 26234859 }
22 | ceph_conf_overrides:
23 | global:
24 | mon_allow_pool_size_one: true
25 | mon_warn_on_pool_no_redundancy: false
26 | osd_pool_default_size: 1
27 | mon_max_pg_per_osd: 512
28 | dashboard_enabled: False
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
32 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [mgrs]
5 | mon0
6 |
7 | [osds]
8 | osd0
9 |
10 | [rbdmirrors]
11 | osd0
12 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/container/secondary/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/container/secondary/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | docker: True
3 | containerized_deployment: true
4 | ceph_origin: repository
5 | ceph_repository: community
6 | cluster: ceph
7 | public_network: "192.168.146.0/24"
8 | cluster_network: "192.168.147.0/24"
9 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
10 | journal_size: 100
11 | osd_objectstore: "bluestore"
12 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
13 | lvm_volumes:
14 | - data: data-lv1
15 | data_vg: test_group
16 | - data: data-lv2
17 | data_vg: test_group
18 | db: journal1
19 | db_vg: journals
20 | os_tuning_params:
21 | - { name: fs.file-max, value: 26234859 }
22 | ceph_conf_overrides:
23 | global:
24 | mon_allow_pool_size_one: true
25 | mon_warn_on_pool_no_redundancy: false
26 | osd_pool_default_size: 1
27 | mon_max_pg_per_osd: 512
28 | dashboard_enabled: False
29 | ceph_docker_registry: quay.io
30 | ceph_docker_image: ceph/daemon-base
31 | ceph_docker_image_tag: latest-main
32 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/container/secondary/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [mgrs]
5 | mon0
6 |
7 | [osds]
8 | osd0
9 |
10 | [rbdmirrors]
11 | osd0
12 |
13 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.140.0/24"
6 | cluster_network: "192.168.141.0/24"
7 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
11 | lvm_volumes:
12 | - data: data-lv1
13 | data_vg: test_group
14 | - data: data-lv2
15 | data_vg: test_group
16 | db: journal1
17 | db_vg: journals
18 | os_tuning_params:
19 | - { name: fs.file-max, value: 26234859 }
20 | ceph_conf_overrides:
21 | global:
22 | mon_allow_pool_size_one: true
23 | mon_warn_on_pool_no_redundancy: false
24 | osd_pool_default_size: 1
25 | mon_max_pg_per_osd: 512
26 | dashboard_enabled: False
27 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [mgrs]
5 | mon0
6 |
7 | [osds]
8 | osd0
9 |
10 | [rbdmirrors]
11 | osd0
12 |
13 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/secondary/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/secondary/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | cluster: ceph
5 | public_network: "192.168.142.0/24"
6 | cluster_network: "192.168.143.0/24"
7 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
8 | journal_size: 100
9 | osd_objectstore: "bluestore"
10 | # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
11 | lvm_volumes:
12 | - data: data-lv1
13 | data_vg: test_group
14 | - data: data-lv2
15 | data_vg: test_group
16 | db: journal1
17 | db_vg: journals
18 | os_tuning_params:
19 | - { name: fs.file-max, value: 26234859 }
20 | ceph_conf_overrides:
21 | global:
22 | mon_allow_pool_size_one: true
23 | mon_warn_on_pool_no_redundancy: false
24 | osd_pool_default_size: 1
25 | mon_max_pg_per_osd: 512
26 | dashboard_enabled: False
27 |
--------------------------------------------------------------------------------
/tests/functional/rbdmirror/secondary/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [mgrs]
5 | mon0
6 |
7 | [osds]
8 | osd0
9 |
10 | [rbdmirrors]
11 | osd0
12 |
13 |
--------------------------------------------------------------------------------
/tests/functional/reboot.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | gather_facts: true
4 | tasks:
5 | - name: reboot the machines
6 | reboot:
7 | reboot_timeout: 180
8 | test_command: uptime
9 | become: yes
10 |
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # this is only here to let the CI tests know
3 | # that this scenario is using docker
4 | docker: True
5 |
6 | containerized_deployment: True
7 | ceph_mon_docker_subnet: "{{ public_network }}"
8 | public_network: "192.168.79.0/24"
9 | cluster_network: "192.168.80.0/24"
10 | ceph_conf_overrides:
11 | global:
12 | mon_allow_pool_size_one: true
13 | mon_warn_on_pool_no_redundancy: false
14 | osd_pool_default_size: 1
15 | dashboard_enabled: False
16 | copy_admin_key: True
17 | ceph_docker_registry: quay.io
18 | ceph_docker_image: ceph/daemon-base
19 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mdss]
8 | mds0
9 |
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | public_network: "192.168.77.0/24"
5 | cluster_network: "192.168.78.0/24"
6 | radosgw_interface: eth1
7 | journal_size: 100
8 | osd_objectstore: "bluestore"
9 | copy_admin_key: true
10 | ceph_conf_overrides:
11 | global:
12 | mon_allow_pool_size_one: true
13 | mon_warn_on_pool_no_redundancy: false
14 | osd_pool_default_size: 1
15 | dashboard_enabled: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | osd_objectstore: "bluestore"
5 | lvm_volumes:
6 | - data: data-lv1
7 | data_vg: test_group
8 | - data: data-lv2
9 | data_vg: test_group
10 | db: journal1
11 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_mds/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mdss]
8 | mds0
9 |
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # this is only here to let the CI tests know
3 | # that this scenario is using docker
4 | docker: True
5 |
6 | containerized_deployment: True
7 | ceph_mon_docker_subnet: "{{ public_network }}"
8 | public_network: "192.168.83.0/24"
9 | cluster_network: "192.168.84.0/24"
10 | ceph_conf_overrides:
11 | global:
12 | mon_allow_pool_size_one: true
13 | mon_warn_on_pool_no_redundancy: false
14 | osd_pool_default_size: 1
15 | dashboard_enabled: False
16 | ceph_docker_registry: quay.io
17 | ceph_docker_image: ceph/daemon-base
18 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mgrs]
8 | mgr0
9 | mgr1
10 |
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | public_network: "192.168.81.0/24"
5 | cluster_network: "192.168.82.0/24"
6 | radosgw_interface: eth1
7 | ceph_conf_overrides:
8 | global:
9 | mon_allow_pool_size_one: true
10 | mon_warn_on_pool_no_redundancy: false
11 | osd_pool_default_size: 1
12 | dashboard_enabled: False
13 |
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | osd_objectstore: "bluestore"
5 | lvm_volumes:
6 | - data: data-lv1
7 | data_vg: test_group
8 | - data: data-lv2
9 | data_vg: test_group
10 | db: journal1
11 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_mgr/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [mgrs]
8 | mgr0
9 | mgr1
10 |
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # this is only here to let the CI tests know
3 | # that this scenario is using docker
4 | docker: True
5 |
6 | containerized_deployment: True
7 | ceph_mon_docker_subnet: "{{ public_network }}"
8 | public_network: "192.168.17.0/24"
9 | cluster_network: "192.168.18.0/24"
10 | ceph_conf_overrides:
11 | global:
12 | mon_allow_pool_size_one: true
13 | mon_warn_on_pool_no_redundancy: false
14 | osd_pool_default_size: 1
15 | dashboard_enabled: False
16 | ceph_docker_registry: quay.io
17 | ceph_docker_image: ceph/daemon-base
18 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [osds]
7 | osd0
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | public_network: "192.168.1.0/24"
5 | cluster_network: "192.168.2.0/24"
6 | ceph_conf_overrides:
7 | global:
8 | mon_allow_pool_size_one: true
9 | mon_warn_on_pool_no_redundancy: false
10 | osd_pool_default_size: 1
11 | dashboard_enabled: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | osd_objectstore: "bluestore"
5 | lvm_volumes:
6 | - data: data-lv1
7 | data_vg: test_group
8 | - data: data-lv2
9 | data_vg: test_group
10 | db: journal1
11 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [osds]
7 | osd0
--------------------------------------------------------------------------------
/tests/functional/shrink_mon/hosts-switch-to-containers:
--------------------------------------------------------------------------------
1 | [all:vars]
2 | docker=True
3 |
4 | [mons]
5 | mon0
6 | mon1
7 | mon2
8 |
9 | [osds]
10 | osd0
11 |
12 | [mdss]
13 | mds0
14 |
15 | [rgws]
16 | rgw0
17 |
18 | [clients]
19 | client0
20 |
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # this is only here to let the CI tests know
3 | # that this scenario is using docker
4 | docker: True
5 |
6 | containerized_deployment: True
7 | ceph_mon_docker_subnet: "{{ public_network }}"
8 | public_network: "192.168.73.0/24"
9 | cluster_network: "192.168.74.0/24"
10 | ceph_conf_overrides:
11 | global:
12 | mon_allow_pool_size_one: true
13 | mon_warn_on_pool_no_redundancy: false
14 | osd_pool_default_size: 1
15 | dashboard_enabled: False
16 | copy_admin_key: True
17 | ceph_docker_registry: quay.io
18 | ceph_docker_image: ceph/daemon-base
19 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
6 | osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
7 |
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | public_network: "192.168.71.0/24"
5 | cluster_network: "192.168.72.0/24"
6 | ceph_conf_overrides:
7 | global:
8 | osd_pool_default_size: 3
9 | dashboard_enabled: False
10 | copy_admin_key: True
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 |
--------------------------------------------------------------------------------
/tests/functional/shrink_osd/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0 monitor_address=192.168.71.10
3 |
4 | [osds]
5 | osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
6 | osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
7 |
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # this is only here to let the CI tests know
3 | # that this scenario is using docker
4 | docker: True
5 | public_network: "192.168.87.0/24"
6 | cluster_network: "192.168.88.0/24"
7 | containerized_deployment: True
8 | ceph_mon_docker_subnet: "{{ public_network }}"
9 | ceph_conf_overrides:
10 | global:
11 | mon_allow_pool_size_one: true
12 | mon_warn_on_pool_no_redundancy: false
13 | osd_pool_default_size: 1
14 | dashboard_enabled: False
15 | copy_admin_key: True
16 | ceph_docker_registry: quay.io
17 | ceph_docker_image: ceph/daemon-base
18 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rbdmirrors]
8 | rbd-mirror0
9 |
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | public_network: "192.168.85.0/24"
5 | cluster_network: "192.168.86.0/24"
6 | osd_objectstore: "bluestore"
7 | copy_admin_key: true
8 | ceph_conf_overrides:
9 | global:
10 | mon_allow_pool_size_one: true
11 | mon_warn_on_pool_no_redundancy: false
12 | osd_pool_default_size: 1
13 | dashboard_enabled: False
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | osd_objectstore: "bluestore"
5 | lvm_volumes:
6 | - data: data-lv1
7 | data_vg: test_group
8 | - data: data-lv2
9 | data_vg: test_group
10 | db: journal1
11 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_rbdmirror/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rbdmirrors]
8 | rbd-mirror0
9 |
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../../all_daemons/ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/container/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # this is only here to let the CI tests know
3 | # that this scenario is using docker
4 | docker: True
5 |
6 | containerized_deployment: True
7 | public_network: "192.168.91.0/24"
8 | cluster_network: "192.168.92.0/24"
9 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
10 | ceph_mon_docker_subnet: "{{ public_network }}"
11 | ceph_conf_overrides:
12 | global:
13 | mon_allow_pool_size_one: true
14 | mon_warn_on_pool_no_redundancy: false
15 | osd_pool_default_size: 1
16 | dashboard_enabled: False
17 | copy_admin_key: True
18 | ceph_docker_registry: quay.io
19 | ceph_docker_image: ceph/daemon-base
20 | ceph_docker_image_tag: latest-main
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | lvm_volumes:
4 | - data: data-lv1
5 | data_vg: test_group
6 | - data: data-lv2
7 | data_vg: test_group
8 | db: journal1
9 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/container/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: true
3 | rgw_create_pools:
4 | foo:
5 | pg_num: 16
6 | type: replicated
7 | bar:
8 | pg_num: 16
9 | rgw_override_bucket_index_max_shards: 16
10 | rgw_bucket_default_quota_max_objects: 1638400
11 |
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rgws]
8 | rgw0
9 |
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | ceph_origin: repository
3 | ceph_repository: community
4 | public_network: "192.168.89.0/24"
5 | cluster_network: "192.168.90.0/24"
6 | radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
7 | osd_objectstore: "bluestore"
8 | copy_admin_key: true
9 | ceph_conf_overrides:
10 | global:
11 | mon_allow_pool_size_one: true
12 | mon_warn_on_pool_no_redundancy: false
13 | osd_pool_default_size: 1
14 | dashboard_enabled: False
15 |
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: False
3 | crush_rule_config: False
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | osd_objectstore: "bluestore"
5 | lvm_volumes:
6 | - data: data-lv1
7 | data_vg: test_group
8 | - data: data-lv2
9 | data_vg: test_group
10 | db: journal1
11 | db_vg: journals
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: true
3 | rgw_create_pools:
4 | foo:
5 | pg_num: 16
6 | type: replicated
7 | bar:
8 | pg_num: 16
9 | rgw_override_bucket_index_max_shards: 16
10 | rgw_bucket_default_quota_max_objects: 1638400
11 |
--------------------------------------------------------------------------------
/tests/functional/shrink_rgw/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 |
4 | [osds]
5 | osd0
6 |
7 | [rgws]
8 | rgw0
9 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/subset_update/ceph-override.json:
--------------------------------------------------------------------------------
1 | {
2 | "ceph_conf_overrides": {
3 | "global": {
4 | "auth_allow_insecure_global_id_reclaim": false,
5 | "osd_pool_default_pg_num": 12,
6 | "osd_pool_default_size": 1,
7 | "mon_allow_pool_size_one": true,
8 | "mon_warn_on_pool_no_redundancy": false,
9 | "mon_max_pg_per_osd": 300
10 | }
11 | }
12 | ],
13 | "ceph_mon_docker_memory_limit": "2g",
14 | "radosgw_num_instances": 2
15 | }
16 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../../../../Vagrantfile
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/ceph-override.json:
--------------------------------------------------------------------------------
1 | ../ceph-override.json
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | user_config: True
3 | copy_admin_key: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | size: 1
8 | test2:
9 | name: "test2"
10 | size: 1
11 | pools:
12 | - "{{ test }}"
13 | - "{{ test2 }}"
14 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: false
3 | crush_rule_config: false
4 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | osd_objectstore: "bluestore"
3 | devices:
4 | - /dev/sda
5 | - /dev/sdb
6 | - /dev/sdc
7 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/group_vars/rgws:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | rgw_create_pools:
4 | foo:
5 | pg_num: 16
6 | type: replicated
7 | bar:
8 | pg_num: 16
9 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/container/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mon0
8 | mon1
9 |
10 | [osds]
11 | osd0
12 | osd1
13 | osd2
14 |
15 | [rgws]
16 | rgw0
17 | rgw1
18 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/group_vars/clients:
--------------------------------------------------------------------------------
1 | ---
2 | copy_admin_key: True
3 | user_config: True
4 | test:
5 | name: "test"
6 | rule_name: "HDD"
7 | size: 1
8 | test2:
9 | name: "test2"
10 | size: 1
11 | pools:
12 | - "{{ test }}"
13 | - "{{ test2 }}"
14 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/group_vars/iscsigws:
--------------------------------------------------------------------------------
1 | ---
2 | generate_crt: True
3 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/group_vars/mons:
--------------------------------------------------------------------------------
1 | ---
2 | create_crush_tree: false
3 | crush_rule_config: false
4 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/group_vars/nfss:
--------------------------------------------------------------------------------
1 | copy_admin_key: true
2 | nfs_file_gw: false
3 | nfs_obj_gw: true
4 | ganesha_conf_overrides: |
5 | CACHEINODE {
6 | Entries_HWMark = 100000;
7 | }
8 | nfs_ganesha_stable: true
9 | nfs_ganesha_dev: false
10 | nfs_ganesha_flavor: "ceph_main"
11 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/group_vars/osds:
--------------------------------------------------------------------------------
1 | ---
2 | os_tuning_params:
3 | - { name: fs.file-max, value: 26234859 }
4 | osd_objectstore: "bluestore"
5 | devices:
6 | - /dev/sda
7 | - /dev/sdb
8 | - /dev/sdc
9 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/group_vars/rgws:
--------------------------------------------------------------------------------
1 | copy_admin_key: true
2 | rgw_create_pools:
3 | foo:
4 | pg_num: 16
5 | type: replicated
6 | bar:
7 | pg_num: 16
8 | rgw_override_bucket_index_max_shards: 16
9 | rgw_bucket_default_quota_max_objects: 1638400
10 |
--------------------------------------------------------------------------------
/tests/functional/subset_update/hosts:
--------------------------------------------------------------------------------
1 | [mons]
2 | mon0
3 | mon1
4 | mon2
5 |
6 | [mgrs]
7 | mon0
8 | mon1
9 |
10 | [osds]
11 | osd0
12 | osd1
13 | osd2
14 |
15 | [rgws]
16 | rgw0
17 | rgw1
18 |
19 |
--------------------------------------------------------------------------------
/tests/functional/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/tests/functional/tests/__init__.py
--------------------------------------------------------------------------------
/tests/functional/tests/grafana/test_grafana.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | class TestGrafanas(object):
5 |
6 | @pytest.mark.dashboard
7 | @pytest.mark.no_docker
8 | def test_grafana_dashboard_is_installed(self, node, host):
9 | assert host.package("ceph-grafana-dashboards").is_installed
10 |
11 | @pytest.mark.dashboard
12 | @pytest.mark.parametrize('svc', [
13 | 'alertmanager', 'grafana-server', 'prometheus'
14 | ])
15 | def test_grafana_service_enabled_and_running(self, node, host, svc):
16 | s = host.service(svc)
17 | assert s.is_enabled
18 | assert s.is_running
19 |
20 | @pytest.mark.dashboard
21 | @pytest.mark.parametrize('port', [
22 | '3000', '9092', '9093'
23 | ])
24 | def test_grafana_socket(self, node, host, setup, port):
25 | s = host.socket('tcp://%s:%s' % (setup["address"], port))
26 | assert s.is_listening
27 |
--------------------------------------------------------------------------------
/tests/functional/tests/mds/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/tests/functional/tests/mds/__init__.py
--------------------------------------------------------------------------------
/tests/functional/tests/mds/test_mds.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import json
3 |
4 |
5 | class TestMDSs(object):
6 |
7 | @pytest.mark.no_docker
8 | def test_mds_is_installed(self, node, host):
9 | assert host.package("ceph-mds").is_installed
10 |
11 | def test_mds_service_enabled_and_running(self, node, host):
12 | service_name = "ceph-mds@{hostname}".format(
13 | hostname=node["vars"]["inventory_hostname"]
14 | )
15 | s = host.service(service_name)
16 | assert s.is_enabled
17 | assert s.is_running
18 |
19 | def test_mds_is_up(self, node, setup, ceph_status):
20 | cluster = setup["cluster_name"]
21 | name = 'client.bootstrap-mds'
22 | output = ceph_status(f'/var/lib/ceph/bootstrap-mds/{cluster}.keyring', name=name)
23 | cluster_status = json.loads(output)
24 | assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get( # noqa E501
25 | 'up:standby', 0)) == len(node["vars"]["groups"]["mdss"])
26 |
--------------------------------------------------------------------------------
/tests/functional/tests/mgr/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/tests/functional/tests/mgr/__init__.py
--------------------------------------------------------------------------------
/tests/functional/tests/mon/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/tests/functional/tests/mon/__init__.py
--------------------------------------------------------------------------------
/tests/functional/tests/node-exporter/test_node_exporter.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | class TestNodeExporter(object):
5 |
6 | @pytest.mark.dashboard
7 | def test_node_exporter_service_enabled_and_running(self, node, host):
8 | s = host.service("node_exporter")
9 | assert s.is_enabled
10 | assert s.is_running
11 |
12 | @pytest.mark.dashboard
13 | def test_node_exporter_socket(self, node, host):
14 | assert host.socket('tcp://9100').is_listening
15 |
--------------------------------------------------------------------------------
/tests/functional/tests/osd/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/tests/functional/tests/osd/__init__.py
--------------------------------------------------------------------------------
/tests/functional/tests/rbd-mirror/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/tests/functional/tests/rbd-mirror/__init__.py
--------------------------------------------------------------------------------
/tests/functional/tests/rgw/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-ansible/5b11adade2153732cec0759918b0dee52063f978/tests/functional/tests/rgw/__init__.py
--------------------------------------------------------------------------------
/tests/inventories/single-machine.yml:
--------------------------------------------------------------------------------
1 | [mons]
2 | localhost
3 |
4 | [osds]
5 | localhost
6 |
7 | [rgws]
8 | localhost
9 |
10 | [mdss]
11 | localhost
12 |
--------------------------------------------------------------------------------
/tests/library/ca_test_common.py:
--------------------------------------------------------------------------------
1 | from ansible.module_utils import basic
2 | from ansible.module_utils._text import to_bytes
3 | import json
4 |
5 |
6 | def set_module_args(args):
7 | if '_ansible_remote_tmp' not in args:
8 | args['_ansible_remote_tmp'] = '/tmp'
9 | if '_ansible_keep_remote_files' not in args:
10 | args['_ansible_keep_remote_files'] = False
11 |
12 | args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
13 | basic._ANSIBLE_ARGS = to_bytes(args)
14 |
15 |
16 | class AnsibleExitJson(Exception):
17 | pass
18 |
19 |
20 | class AnsibleFailJson(Exception):
21 | pass
22 |
23 |
24 | def exit_json(*args, **kwargs):
25 | raise AnsibleExitJson(kwargs)
26 |
27 |
28 | def fail_json(*args, **kwargs):
29 | raise AnsibleFailJson(kwargs)
30 |
--------------------------------------------------------------------------------
/tests/pytest.ini:
--------------------------------------------------------------------------------
1 | # this is just a placeholder so that we can define what the 'root' of the tests
2 | # dir really is.
3 | [pytest]
4 | markers =
5 | ceph_exporter: environment with ceph exporter enabled
6 | ceph_crash: environment with ceph crash enabled
7 | dashboard: environment with dashboard enabled
8 | no_docker: environment without containers
9 | docker: environment with containers
10 | all: for all nodes
11 | mdss: for mds nodes
12 | mgrs: for mgr nodes
13 | mons: for mon nodes
14 | nfss: for nfs nodes
15 | osds: for osd nodes
16 | rbdmirrors: for rbdmirror nodes
17 | rgws: for rgw nodes
18 | grafanas: for grafana nodes
19 |
--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | # These are Python requirements needed to run the functional tests
2 | pytest-testinfra
3 | pytest-xdist
4 | pytest
5 | ansible-core>=2.15,<2.17,!=2.9.10
6 | netaddr
7 | mock
8 | jmespath
9 | pytest-rerunfailures
10 | pytest-cov
11 | setuptools
12 |
--------------------------------------------------------------------------------
/tests/scripts/generate_ssh_config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Generate a custom ssh config from Vagrant so that it can then be used by
3 | # ansible.cfg
4 |
5 | path=$1
6 |
7 | if [ $# -eq 0 ]
8 | then
9 | echo "A path to the scenario is required as an argument and it wasn't provided"
10 | exit 1
11 | fi
12 |
13 | cd "$path"
14 |
15 | # Let's print vagrant status for debug purposes and to give the VMs a second to
16 | # settle before asking vagrant for SSH config.
17 | vagrant status || true
18 |
19 | n=0
20 | until [ "$n" -ge 5 ]
21 | do
22 | vagrant ssh-config > vagrant_ssh_config && break
23 | n=$((n+1))
24 | echo "\`vagrant ssh-config\` failed. Retrying."
25 | sleep 3
26 | done
27 |
28 | if [ "$n" -eq 5 ]; then
29 | echo "\`vagrant ssh-config\` failed 5 times. This is a fatal error."
30 | cat vagrant_ssh_config
31 | exit 1
32 | fi
33 |
--------------------------------------------------------------------------------
/tests/scripts/vagrant_up.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 | if [[ -n $1 ]]; then
4 | DIRECTORY=$1
5 | shift
6 | else
7 | DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
8 | fi
9 | pushd "${DIRECTORY}"
10 |
11 | if [[ "${CEPH_ANSIBLE_VAGRANT_BOX}" =~ "centos/stream" ]]; then
12 | EL_VERSION="${CEPH_ANSIBLE_VAGRANT_BOX: -1}"
13 | LATEST_IMAGE="$(curl -s https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/CHECKSUM | sed -nE 's/^SHA256.*\((.*-([0-9]+).*vagrant-libvirt.box)\).*$/\1/p' | sort -u | tail -n1)"
14 | vagrant box remove "${CEPH_ANSIBLE_VAGRANT_BOX}" --all --force || true
15 | vagrant box add --force --provider libvirt --name "${CEPH_ANSIBLE_VAGRANT_BOX}" "https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/${LATEST_IMAGE}" --force
16 | fi
17 |
18 | retries=0
19 | until [ $retries -ge 5 ]
20 | do
21 | echo "Attempting to start VMs. Attempts: $retries"
22 | timeout 10m time vagrant up "$@" && break
23 | retries=$((retries+1))
24 | sleep 5
25 | done
26 |
27 | sleep 10
28 | popd
29 |
--------------------------------------------------------------------------------
/tests/scripts/workflows/signed-off.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 |
4 | if [[ "$(git log --oneline --no-merges origin/"${GITHUB_BASE_REF}"..HEAD | wc -l)" -ne "$(git log --no-merges origin/"${GITHUB_BASE_REF}"..HEAD | grep -c Signed-off-by)" ]]; then
5 | echo "One or more commits is/are missing a Signed-off-by. Add it with 'git commit -s'."
6 | exit 1
7 | else
8 | echo "Sign-off ok!"
9 | fi
--------------------------------------------------------------------------------