├── .coveragerc ├── .github └── workflows │ ├── crmsh-cd.yml │ ├── crmsh-ci.yml │ └── test-container-image.yml ├── .gitignore ├── .hgignore ├── .vscode └── settings.json ├── AUTHORS ├── COPYING ├── ChangeLog ├── MANIFEST.in ├── Makefile.am ├── NEWS ├── README.md ├── TODO ├── autogen.sh ├── bin └── crm ├── codecov.yml ├── configure.ac ├── contrib ├── README.vimsyntax ├── bash_completion.sh ├── git-hook-pre-commit ├── pcmk-ftdetect.vim ├── pcmk.vim ├── pygments_crmsh_lexers │ ├── __init__.py │ ├── ansiclr.py │ └── crmsh.py └── setup.py ├── crmsh.spec.in ├── crmsh.tmpfiles.d.conf ├── crmsh ├── __init__.py ├── bootstrap.py ├── cache.py ├── cibconfig.py ├── cibquery.py ├── cibstatus.py ├── cibverify.py ├── clidisplay.py ├── cliformat.py ├── cluster_fs.py ├── cmd_status.py ├── command.py ├── completers.py ├── config.py ├── constants.py ├── corosync.py ├── corosync_config_format.py ├── crash_test │ ├── __init__.py │ ├── check.py │ ├── config.py │ ├── explain.py │ ├── main.py │ ├── task.py │ └── utils.py ├── crm_gv.py ├── crm_pssh.py ├── handles.py ├── healthcheck.py ├── help.py ├── history.py ├── idmgmt.py ├── iproute2.py ├── lock.py ├── log.py ├── log_patterns.py ├── logparser.py ├── logtime.py ├── main.py ├── migration-unsupported-resource-agents.txt ├── migration.py ├── minieval.py ├── options.py ├── orderedset.py ├── pacemaker.py ├── parallax.py ├── parse.py ├── prun │ ├── __init__.py │ ├── prun.py │ └── runner.py ├── pyshim.py ├── qdevice.py ├── ra.py ├── report │ ├── __init__.py │ ├── collect.py │ ├── constants.py │ ├── core.py │ ├── sh.py │ └── utils.py ├── rsctest.py ├── sbd.py ├── schema.py ├── scripts.py ├── service_manager.py ├── sh.py ├── ssh_key.py ├── template.py ├── term.py ├── tmpfiles.py ├── ui_assist.py ├── ui_cib.py ├── ui_cibstatus.py ├── ui_cluster.py ├── ui_configure.py ├── ui_context.py ├── ui_corosync.py ├── ui_history.py ├── ui_maintenance.py ├── ui_node.py ├── ui_options.py ├── ui_ra.py ├── ui_resource.py ├── ui_root.py ├── ui_sbd.py ├── ui_script.py ├── ui_site.py ├── ui_template.py ├── ui_utils.py ├── upgradeutil.py ├── user_of_host.py ├── userdir.py ├── utils.py ├── watchdog.py └── xmlutil.py ├── data-manifest ├── doc ├── .gitignore ├── Makefile ├── bootstrap-howto.md ├── bootstrap-todo.md ├── crm.8.adoc ├── crmsh_crm_report.8.adoc ├── development.md ├── profiles.adoc ├── releasing-a-new-version.md ├── sort-doc.py ├── toolchain │ ├── Containerfile │ ├── bin │ │ ├── adocaio │ │ ├── adocxt │ │ └── help2adoc │ └── lib │ │ └── help2adoc │ │ ├── __init__.py │ │ ├── generator.py │ │ ├── main.py │ │ └── parser.py └── website-v1 │ ├── .gitignore │ ├── 404.adoc │ ├── Makefile │ ├── about.adoc │ ├── configuration.adoc │ ├── crm.conf │ ├── crmold.conf │ ├── css │ ├── crm.css │ ├── font-awesome.css │ └── font-awesome.min.css │ ├── development.adoc │ ├── documentation.adoc │ ├── download.adoc │ ├── faq.adoc │ ├── fonts │ ├── FontAwesome.otf │ ├── fontawesome-webfont.eot │ ├── fontawesome-webfont.svg │ ├── fontawesome-webfont.ttf │ └── fontawesome-webfont.woff │ ├── history-guide.adoc │ ├── img │ ├── history-guide │ │ ├── sample-cluster.conf.png │ │ └── smallapache-start.png │ ├── icons │ │ ├── README │ │ ├── callouts │ │ │ ├── 1.png │ │ │ ├── 10.png │ │ │ ├── 11.png │ │ │ ├── 12.png │ │ │ ├── 13.png │ │ │ ├── 14.png │ │ │ ├── 15.png │ │ │ ├── 2.png │ │ │ ├── 3.png │ │ │ ├── 4.png │ │ │ ├── 5.png │ │ │ ├── 6.png │ │ │ ├── 7.png │ │ │ ├── 8.png │ │ │ └── 9.png │ │ ├── caution.png │ │ ├── example.png │ │ ├── home.png │ │ ├── important.png │ │ ├── next.png │ │ ├── note.png │ │ ├── prev.png │ │ ├── tip.png │ │ ├── up.png │ │ └── warning.png │ ├── laptop.png │ ├── loader.gif │ └── servers.gif │ ├── include │ └── history-guide │ │ ├── basic-transition.typescript │ │ ├── diff.typescript │ │ ├── info.typescript │ │ ├── nfs-probe-err.typescript │ │ ├── resource-trace.typescript │ │ ├── resource.typescript │ │ ├── sample-cluster.conf.crm │ │ ├── status-probe-fail.typescript │ │ ├── stonith-corosync-stopped.typescript │ │ └── transition-log.typescript │ ├── index.adoc │ ├── installation.adoc │ ├── make-news.py │ ├── man-1.2.adoc │ ├── man-2.0.adoc │ ├── man-3.adoc │ ├── man-4.3.adoc │ ├── man-4.6.adoc │ ├── news │ ├── 2014-06-30-release-2_1.adoc │ ├── 2014-10-28-release-2_1_1.adoc │ ├── 2015-01-26-release-2_1_2.adoc │ ├── 2015-04-10-release-2_1_3.adoc │ ├── 2015-05-13-release-2_1_4.adoc │ ├── 2015-05-25-getting-started-jp.adoc │ ├── 2016-01-12-release-2_1_5.adoc │ ├── 2016-01-15-release-2_2_0.adoc │ ├── 2016-04-28-release-2_2_1.adoc │ ├── 2016-08-12-release-2_3_0.adoc │ ├── 2016-09-01-release-2_1_7.adoc │ ├── 2016-09-02-release-2_3_1.adoc │ ├── 2016-09-05-release-2_2_2.adoc │ ├── 2017-01-31-release-3_0_0.adoc │ └── 2021-06-17-release-4_3_1.adoc │ ├── postprocess.py │ ├── rsctest-guide.adoc │ ├── scripts.adoc │ └── start-guide.adoc ├── etc ├── crm.conf.in └── profiles.yml ├── high-availability.xml ├── pylint.toml ├── pytest.ini ├── requirements.txt ├── scripts ├── apache │ └── main.yml ├── check-uptime │ ├── fetch.py │ ├── main.yml │ └── report.py ├── cryptctl │ ├── README.md │ └── main.yml ├── database │ └── main.yml ├── db2-hadr │ └── main.yml ├── db2 │ └── main.yml ├── drbd │ └── main.yml ├── exportfs │ └── main.yml ├── filesystem │ └── main.yml ├── gfs2-base │ └── main.yml ├── gfs2 │ └── main.yml ├── haproxy │ ├── haproxy.cfg │ └── main.yml ├── health │ ├── collect.py │ ├── hahealth.py │ ├── main.yml │ └── report.py ├── libvirt │ └── main.yml ├── lvm-drbd │ └── main.yml ├── lvm │ └── main.yml ├── mailto │ └── main.yml ├── nfsserver-lvm-drbd │ └── main.yml ├── nfsserver │ └── main.yml ├── nginx │ └── main.yml ├── oracle │ └── main.yml ├── raid-lvm │ └── main.yml ├── raid1 │ └── main.yml ├── sap-as │ └── main.yml ├── sap-ci │ └── main.yml ├── sap-db │ └── main.yml ├── sap-simple-stack-plus │ └── main.yml ├── sap-simple-stack │ └── main.yml ├── sapdb │ └── main.yml ├── sapinstance │ └── main.yml ├── sbd-device │ └── main.yml ├── sbd │ └── main.yml ├── virtual-ip │ └── main.yml └── vmware │ └── main.yml ├── setup.py ├── templates ├── apache ├── filesystem ├── gfs2 ├── gfs2-base ├── sbd └── virtual-ip ├── test ├── README.regression ├── bugs-test.txt ├── cib-tests.sh ├── cibtests │ ├── 001.exp.xml │ ├── 001.input │ ├── 002.exp.xml │ ├── 002.input │ ├── 003.exp.xml │ ├── 003.input │ ├── 004.exp.xml │ ├── 004.input │ └── shadow.base ├── crm-interface ├── defaults ├── descriptions ├── evaltest.sh ├── features │ ├── bootstrap_bugs.feature │ ├── bootstrap_firewalld.feature │ ├── bootstrap_init_join_remove.feature │ ├── bootstrap_options.feature │ ├── bootstrap_sbd_delay.feature │ ├── bootstrap_sbd_normal.feature │ ├── cluster_api.feature │ ├── cluster_blocking_ssh.feature │ ├── configure_bugs.feature │ ├── constraints_bugs.feature │ ├── corosync_ui.feature │ ├── coverage │ │ ├── coveragerc │ │ └── sitecustomize.py │ ├── crm_report_bugs.feature │ ├── crm_report_normal.feature │ ├── environment.py │ ├── geo_setup.feature │ ├── gfs2.feature │ ├── healthcheck.feature │ ├── migration.feature │ ├── ocfs2.feature │ ├── qdevice_options.feature │ ├── qdevice_setup_remove.feature │ ├── qdevice_usercase.feature │ ├── qdevice_validate.feature │ ├── resource_failcount.feature │ ├── resource_set.feature │ ├── sbd_ui.feature │ ├── ssh_agent.feature │ ├── steps │ │ ├── __init__.py │ │ ├── behave_agent.py │ │ ├── const.py │ │ ├── step_implementation.py │ │ └── utils.py │ └── user_access.feature ├── history-test.tar.bz2 ├── list-undocumented-commands.py ├── profile-history.sh ├── regression.sh ├── run-functional-tests ├── testcases │ ├── acl │ ├── acl.excl │ ├── acl.exp │ ├── basicset │ ├── bugs │ ├── bugs.exp │ ├── bundle │ ├── bundle.exp │ ├── commit │ ├── commit.exp │ ├── common.excl │ ├── common.filter │ ├── confbasic │ ├── confbasic-xml │ ├── confbasic-xml.exp │ ├── confbasic-xml.filter │ ├── confbasic.exp │ ├── delete │ ├── delete.exp │ ├── edit │ ├── edit.excl │ ├── edit.exp │ ├── file │ ├── file.exp │ ├── history │ ├── history.excl │ ├── history.exp │ ├── history.post │ ├── history.pre │ ├── newfeatures │ ├── newfeatures.exp │ ├── node │ ├── node.exp │ ├── options │ ├── options.exp │ ├── ra │ ├── ra.exp │ ├── ra.filter │ ├── resource │ ├── resource.exp │ ├── rset │ ├── rset-xml │ ├── rset-xml.exp │ ├── rset.exp │ ├── scripts │ ├── scripts.exp │ ├── scripts.filter │ ├── shadow │ ├── shadow.exp │ └── xmlonly.sh ├── unittests │ ├── __init__.py │ ├── pacemaker.log │ ├── pacemaker.log.2 │ ├── pacemaker_unicode.log │ ├── schemas │ │ ├── acls-1.1.rng │ │ ├── acls-1.2.rng │ │ ├── constraints-1.0.rng │ │ ├── constraints-1.1.rng │ │ ├── constraints-1.2.rng │ │ ├── fencing.rng │ │ ├── nvset.rng │ │ ├── pacemaker-1.0.rng │ │ ├── pacemaker-1.1.rng │ │ ├── pacemaker-1.2.rng │ │ ├── resources-1.0.rng │ │ ├── resources-1.1.rng │ │ ├── resources-1.2.rng │ │ ├── rule.rng │ │ ├── score.rng │ │ └── versions.rng │ ├── scripts │ │ ├── inc1 │ │ │ └── main.yml │ │ ├── inc2 │ │ │ └── main.yml │ │ ├── legacy │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── apache.xml │ │ │ └── virtual-ip.xml │ │ ├── unified │ │ │ └── main.yml │ │ ├── v2 │ │ │ └── main.yml │ │ ├── vip │ │ │ └── main.yml │ │ ├── vipinc │ │ │ └── main.yml │ │ └── workflows │ │ │ └── 10-webserver.xml │ ├── test.conf │ ├── test_bootstrap.py │ ├── test_bugs.py │ ├── test_cib.py │ ├── test_cibquery.py │ ├── test_cliformat.py │ ├── test_cluster_fs.py │ ├── test_corosync.py │ ├── test_corosync_config_format.py │ ├── test_crashtest_check.py │ ├── test_crashtest_main.py │ ├── test_crashtest_task.py │ ├── test_crashtest_utils.py │ ├── test_gv.py │ ├── test_handles.py │ ├── test_lock.py │ ├── test_migration.py │ ├── test_objset.py │ ├── test_parallax.py │ ├── test_parse.py │ ├── test_prun.py │ ├── test_qdevice.py │ ├── test_ratrace.py │ ├── test_report_collect.py │ ├── test_report_core.py │ ├── test_report_sh.py │ ├── test_report_utils.py │ ├── test_sbd.py │ ├── test_scripts.py │ ├── test_service_manager.py │ ├── test_sh.py │ ├── test_time.py │ ├── test_ui_cluster.py │ ├── test_ui_corosync.py │ ├── test_ui_sbd.py │ ├── test_upgradeuitl.py │ ├── test_utils.py │ ├── test_watchdog.py │ └── test_xmlutil.py └── update-expected-output.sh ├── test_container ├── Dockerfile ├── behave-agent.socket ├── behave-agent@.service └── behave_agent.py ├── tox.ini ├── update-data-manifest.sh ├── utils ├── crm_clean.py ├── crm_init.py ├── crm_pkg.py ├── crm_rpmcheck.py └── crm_script.py └── version.in /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | */tests/* 4 | */test/* 5 | *setup.py* 6 | tests/* 7 | -------------------------------------------------------------------------------- /.github/workflows/crmsh-cd.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | # For more information about secrets see: https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets 4 | 5 | name: crmsh CD 6 | 7 | on: push 8 | 9 | env: 10 | PACKAGE_NAME: crmsh 11 | CONTAINER_IMAGE: nyang23/obs-continuous-delivery:latest 12 | OBS_USER: ${{ secrets.OBS_USER }} 13 | OBS_PASS: ${{ secrets.OBS_PASS }} 14 | OBS_PROJECT: network:ha-clustering:Unstable 15 | TARGET_PROJECT: network:ha-clustering:Factory 16 | 17 | jobs: 18 | integration: 19 | if: github.repository == 'ClusterLabs/crmsh' && github.ref_name == 'master' 20 | uses: ./.github/workflows/crmsh-ci.yml 21 | secrets: inherit 22 | 23 | delivery: 24 | needs: integration 25 | runs-on: ubuntu-24.04 26 | timeout-minutes: 10 27 | steps: 28 | - uses: actions/checkout@v4 29 | - name: delivery process 30 | run: | 31 | docker pull "${CONTAINER_IMAGE}" 32 | docker run -t -v "$(pwd)":/package:ro \ 33 | -e OBS_USER=$OBS_USER \ 34 | -e OBS_PASS=$OBS_PASS \ 35 | -e OBS_PROJECT=$OBS_PROJECT \ 36 | -e PACKAGE_NAME=$PACKAGE_NAME \ 37 | "${CONTAINER_IMAGE}" \ 38 | /bin/bash -c "cp -r /package ~/package && cd ~/package && /scripts/upload.sh" 39 | 40 | submit: 41 | needs: delivery 42 | runs-on: ubuntu-24.04 43 | timeout-minutes: 10 44 | steps: 45 | - uses: actions/checkout@v4 46 | - name: submit process 47 | run: | 48 | docker pull "${CONTAINER_IMAGE}" 49 | docker run -t \ 50 | -e OBS_USER=$OBS_USER \ 51 | -e OBS_PASS=$OBS_PASS \ 52 | -e OBS_PROJECT=$OBS_PROJECT \ 53 | -e PACKAGE_NAME=$PACKAGE_NAME \ 54 | -e TARGET_PROJECT=$TARGET_PROJECT \ 55 | "${CONTAINER_IMAGE}" \ 56 | /bin/bash -c "cd ~ && /scripts/submit.sh" 57 | -------------------------------------------------------------------------------- /.github/workflows/test-container-image.yml: -------------------------------------------------------------------------------- 1 | name: test container CI 2 | on: 3 | push: 4 | paths: 5 | - "test_container/**" 6 | pull_request: 7 | paths: 8 | - "test_container/**" 9 | schedule: 10 | - cron: "31 4 * * SUN" 11 | jobs: 12 | build: 13 | #if: github.repository == 'ClusterLabs/crmsh' && github.ref_name == 'master' 14 | runs-on: ubuntu-24.04 15 | defaults: 16 | run: 17 | working-directory: ./test_container 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: build container image 21 | run: podman image build -t haleap:ci . 22 | - name: push container image 23 | if: ${{ github.ref_name == 'master' }} 24 | env: 25 | DOCKER_IO_ACCESS_TOKEN: ${{ secrets.DOCKER_IO_ACCESS_TOKEN }} 26 | run: | 27 | echo "$DOCKER_IO_ACCESS_TOKEN" | podman login --username ${{ vars.DOCKER_IO_USERNAME }} --password-stdin docker.io 28 | podman image tag haleap:ci docker.io/nyang23/haleap:${{ github.ref_name }} 29 | podman image push docker.io/nyang23/haleap:${{ github.ref_name }} 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *~ 3 | #*.*# 4 | .#* 5 | doc/website-v1/gen 6 | Makefile.in 7 | autom4te.cache 8 | ./Makefile 9 | aclocal.m4 10 | autoconf 11 | autoheader 12 | automake 13 | config.log 14 | config.status 15 | configure 16 | crm.conf 17 | crmsh.spec 18 | install-sh 19 | missing 20 | version 21 | crmsh.egg-info/* 22 | crmtestout/* 23 | doc/crm.8 24 | doc/crm.8.html 25 | doc/crmsh_hb_report.8 26 | doc/crmsh_hb_report.8.html 27 | hb_report/hb_report 28 | patches/* 29 | build/* 30 | 31 | # Tool specific files 32 | .README.md.html 33 | .*.*~ 34 | .project 35 | .settings 36 | .pydevproject 37 | .coverage 38 | 39 | contrib/build/ 40 | contrib/dist/ 41 | contrib/pygments_crmsh_lexers.egg-info/ 42 | 43 | .tox/ 44 | -------------------------------------------------------------------------------- /.hgignore: -------------------------------------------------------------------------------- 1 | syntax: glob 2 | 3 | *.pyc 4 | *~ 5 | #*.*# 6 | doc/gen 7 | 8 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.linting.pylintEnabled": true, 3 | "python.linting.flake8Enabled": false, 4 | "python.linting.enabled": true, 5 | "python.pythonPath": "/usr/bin/python3" 6 | } -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | NOTE: The work of everyone on this project is dearly appreciated. If you 2 | are not listed here but should be, please notify us! 3 | 4 | afederic 5 | Adam Spiers 6 | Andrei Maruha 7 | Andrew Beekhof 8 | Bin Liu 9 | Borislav Borisov 10 | Christian Seiler 11 | Daniel Hoffend 12 | Dejan Muhamedagic 13 | dougcahill 14 | Eric Ren 15 | Federica Teodori 16 | Florian Haas 17 | Goldwyn Rodrigues 18 | Hideo Yamauchi 19 | Holger Teutsch 20 | Igor Tsiglyar 21 | Kai Kang 22 | Kazunori INOUE 23 | Keisuke MORI 24 | Kristoffer Gronlund 25 | Larry Chen 26 | Lars Ellenberg 27 | Lars Marowsky-Brée 28 | Marc A. Smith 29 | Michael Prokop 30 | Motaharu Kobu 31 | NAKAHIRA Kazutomo 32 | Nate Clark 33 | nozawat 34 | Pedro Salgado 35 | Peter Schwindt 36 | Richard B Winters 37 | seabres 38 | Tim Serong 39 | Thomas Rohlajz 40 | Valentin Vidic 41 | Vincenzo Pii 42 | Vladislav Bogdanov 43 | Xia Li 44 | Xin Liang 45 | Xinwei Hu 46 | Yan Gao 47 | Yuusuke IIDA 48 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include crmsh *.txt 2 | -------------------------------------------------------------------------------- /NEWS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/NEWS -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | Features 2 | 3 | . Audit 4 | 5 | - add user auditing, i.e. save all commands that were run 6 | (DONE: see the -R flag) 7 | 8 | - save to a local file (distributed DB would probably be an 9 | overkill) 10 | 11 | . Cluster documentation 12 | 13 | - one of the more recent features is graph capability 14 | (graphviz) which is a very good step in terms of cluster 15 | documentation; need to extend that with some textual 16 | cluster description and perhaps history and such 17 | 18 | - everybody likes reports (and in particular your boss) 19 | 20 | - this feature needs very careful consideration 21 | 22 | . CIB features 23 | 24 | - Support ACL commands in Pacemaker 1.1.12> 25 | (DONE) 26 | 27 | . Command features 28 | 29 | - Relative commands: /status from configure, ../resource stop foo 30 | from configure, cib/new from configure... for example. 31 | 32 | Tricky part: Have to push/pop levels invisibly, resource 33 | commands modify CIB while CIB is edited in configure. Similar 34 | races could occur with other commands. -------------------------------------------------------------------------------- /bin/crm: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # crmsh, command line interface for Linux HA clusters 4 | # Copyright (C) 2008-2015 Dejan Muhamedagic 5 | # Copyright (C) 2013-2015 Kristoffer Gronlund 6 | # 7 | # This program is free software; you can redistribute it and/or modify 8 | # it under the terms of the GNU General Public License as published by 9 | # the Free Software Foundation; either version 2 of the License, or 10 | # (at your option) any later version. 11 | # 12 | # This program is distributed in the hope that it will be useful, 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | # GNU General Public License for more details. 16 | # 17 | # You should have received a copy of the GNU General Public License along 18 | # with this program; if not, write to the Free Software Foundation, Inc., 19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20 | # 21 | 22 | import sys 23 | 24 | if int(sys.version[0]) < 3: 25 | sys.stderr.write("Abort: crmsh only support python3\n") 26 | sys.exit(-1) 27 | 28 | try: 29 | from crmsh import log 30 | if '-h' not in sys.argv and '--help' not in sys.argv: 31 | log.setup_logging() 32 | else: 33 | log.setup_logging(only_help=True) 34 | 35 | from crmsh import main 36 | except ImportError as msg: 37 | sys.stderr.write('''Fatal error: 38 | %s 39 | 40 | Failed to start crmsh! This is likely due to: 41 | - A missing dependency (eg. corresponding python3 version) 42 | - A broken installation 43 | 44 | If you are using a packaged version of crmsh, please try 45 | reinstalling the package. Also check your PYTHONPATH and 46 | make sure that the crmsh module is reachable. 47 | 48 | Please file an issue describing your installation at 49 | https://github.com/Clusterlabs/crmsh/issues/ . 50 | ''' % (msg)) 51 | sys.exit(-1) 52 | 53 | rc = main.run() 54 | sys.exit(rc) 55 | 56 | # vim:ts=4:sw=4:et: 57 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | threshold: 0.35% 6 | patch: 7 | default: 8 | threshold: 0.35% 9 | codecov: 10 | notify: 11 | after_n_builds: 31 12 | comment: 13 | after_n_builds: 31 14 | layout: "condensed_header, flags, files, condensed_footer" 15 | -------------------------------------------------------------------------------- /configure.ac: -------------------------------------------------------------------------------- 1 | dnl 2 | dnl autoconf for crmsh 3 | dnl 4 | dnl Copyright (C) 2015 Kristoffer Gronlund 5 | dnl Copyright (C) 2008 Andrew Beekhof 6 | dnl 7 | dnl License: GNU General Public License (GPL) 8 | 9 | AC_PREREQ([2.53]) 10 | 11 | AC_INIT([crmsh],[5.0.0],[users@clusterlabs.org]) 12 | 13 | AC_ARG_WITH(version, 14 | [ --with-version=version Override package version (if you're a packager needing to pretend) ], 15 | [ PACKAGE_VERSION="$withval" ]) 16 | 17 | AC_ARG_WITH(pkg-name, 18 | [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], 19 | [ PACKAGE_NAME="$withval" ]) 20 | 21 | OCF_ROOT_DIR="/usr/lib/ocf" 22 | AC_ARG_WITH(ocf-root, 23 | [ --with-ocf-root=DIR directory for OCF scripts [${OCF_ROOT_DIR}]], 24 | [ if test x"$withval" = xprefix; then OCF_ROOT_DIR=${prefix}; else 25 | OCF_ROOT_DIR="$withval"; fi ]) 26 | 27 | AC_ARG_WITH(daemon-user, 28 | [ --with-daemon-user=USER_NAME 29 | User to run privileged non-root things as. [default=hacluster] ], 30 | [ CRM_DAEMON_USER="$withval" ], 31 | [ CRM_DAEMON_USER="hacluster" ]) 32 | 33 | AM_INIT_AUTOMAKE([no-define foreign]) 34 | m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES]) 35 | AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE_NAME") 36 | AC_DEFINE_UNQUOTED(VERSION, "$PACKAGE_VERSION") 37 | 38 | dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from 39 | dnl normal compilation. When a failure occurs, it will then display the full 40 | dnl command line 41 | dnl Wrap in m4_ifdef to avoid breaking on older platforms 42 | m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES]) 43 | 44 | AC_SUBST(OCF_ROOT_DIR) 45 | AC_SUBST(CRM_DAEMON_USER) 46 | 47 | CRM_CACHE_DIR=${localstatedir}/cache/crm 48 | AC_DEFINE_UNQUOTED(CRM_CACHE_DIR,"$CRM_CACHE_DIR", Where crm shell keeps the cache) 49 | AC_SUBST(CRM_CACHE_DIR) 50 | 51 | AM_PATH_PYTHON([3]) 52 | AC_PATH_PROGS(ASCIIDOC, asciidoc) 53 | 54 | AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"") 55 | 56 | AC_CONFIG_FILES(Makefile \ 57 | etc/crm.conf \ 58 | version \ 59 | crmsh.spec \ 60 | ) 61 | 62 | AC_OUTPUT 63 | -------------------------------------------------------------------------------- /contrib/README.vimsyntax: -------------------------------------------------------------------------------- 1 | There were two VIM syntax files contributed: 2 | 3 | pacemaker-crm.vim 4 | pcmk.vim 5 | 6 | The first one got removed because it didn't work with newer CRM 7 | shell syntax anymore; most of the text was highlighted as "Error". 8 | 9 | Neither matches colours used in crm configure show and both need 10 | to be improved. Still, you may want to edit a more colorful 11 | configuration. To have that in "crm configure edit" do the 12 | following: 13 | 14 | 1. Copy pcmk.vim to ~/.vim/syntax/pcmk.vim. 15 | 16 | 2. Make sure the following is added to your VIM rc file 17 | (~/.vimrc or ~/.exrc): 18 | 19 | syntax on 20 | set modeline 21 | set modelines=5 22 | 23 | 3. Copy the pcmk-ftdetect.vim to ~/.vim/ftdetect/ to 24 | make files being identified automatically. 25 | 26 | 27 | If you're editing a file directly, just type: 28 | 29 | :setf pcmk 30 | 31 | Many thanks to the contributors: 32 | 33 | Trevor Hemsley 34 | Dan Frincu 35 | Lars Ellenberg 36 | -------------------------------------------------------------------------------- /contrib/git-hook-pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to verify what is about to be committed. 4 | # Called by "git commit" with no arguments. The hook should 5 | # exit with non-zero status after issuing an appropriate message if 6 | # it wants to stop the commit. 7 | # 8 | # To enable this hook, rename this file to "pre-commit". 9 | 10 | root="$(git rev-parse --show-toplevel)" 11 | [ -d "$root" ] || exit 1 12 | 13 | ./update-data-manifest.sh 14 | git add ./data-manifest 15 | -------------------------------------------------------------------------------- /contrib/pcmk-ftdetect.vim: -------------------------------------------------------------------------------- 1 | " test for match at first character 2 | au BufNewFile,BufRead * if match(getline(1), 'node ')==0 | set ft=pcmk | endif 3 | -------------------------------------------------------------------------------- /contrib/pygments_crmsh_lexers/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from .ansiclr import ANSIColorsLexer 3 | from .crmsh import CrmshLexer 4 | -------------------------------------------------------------------------------- /contrib/pygments_crmsh_lexers/ansiclr.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | pygments.lexers.console 4 | ~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | Lexers for misc console output. 7 | 8 | :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. 9 | :license: BSD, see LICENSE for details. 10 | """ 11 | from __future__ import unicode_literals 12 | 13 | from pygments.lexer import RegexLexer, bygroups 14 | from pygments.token import Generic, Text 15 | 16 | __all__ = ['ANSIColorsLexer'] 17 | 18 | _ESC = "\x1b\[" 19 | # this is normally to reset (reset attributes, set primary font) 20 | # there could be however other reset sequences and in that case 21 | # sgr0 needs to be updated 22 | _SGR0 = "%s(?:0;10|10;0)m" % _ESC 23 | # BLACK RED GREEN YELLOW 24 | # BLUE MAGENTA CYAN WHITE 25 | _ANSI_COLORS = (Generic.Emph, Generic.Error, Generic.Inserted, Generic.Keyword, 26 | Generic.Keyword, Generic.Prompt, Generic.Traceback, Generic.Output) 27 | 28 | 29 | def _ansi2rgb(lexer, match): 30 | code = match.group(1) 31 | text = match.group(2) 32 | yield match.start(), _ANSI_COLORS[int(code)-30], text 33 | 34 | 35 | class ANSIColorsLexer(RegexLexer): 36 | """ 37 | Interpret ANSI colors. 38 | """ 39 | name = 'ANSI Colors' 40 | aliases = ['ansiclr'] 41 | filenames = ["*.typescript"] 42 | 43 | tokens = { 44 | 'root': [ 45 | (r'%s(3[0-7]+)m(.*?)%s' % (_ESC, _SGR0), _ansi2rgb), 46 | (r'[^\x1b]+', Text), 47 | # drop the rest of the graphic codes 48 | (r'(%s[0-9;]+m)()' % _ESC, bygroups(None, Text)), 49 | ] 50 | } 51 | -------------------------------------------------------------------------------- /contrib/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import unicode_literals 4 | from setuptools import setup 5 | 6 | setup(name='pygments-crmsh-lexers', 7 | version='0.0.5', 8 | description='Pygments crmsh custom lexers.', 9 | keywords='pygments crmsh lexer', 10 | license='BSD', 11 | 12 | author='Kristoffer Gronlund', 13 | author_email='kgronlund@suse.com', 14 | 15 | url='https://github.com/ClusterLabs/crmsh', 16 | 17 | packages=['pygments_crmsh_lexers'], 18 | install_requires=['pygments>=2.0.2'], 19 | 20 | entry_points='''[pygments.lexers] 21 | ANSIColorsLexer=pygments_crmsh_lexers:ANSIColorsLexer 22 | CrmshLexer=pygments_crmsh_lexers:CrmshLexer''', 23 | 24 | classifiers=[ 25 | 'Environment :: Plugins', 26 | 'Intended Audience :: Developers', 27 | 'License :: OSI Approved :: BSD License', 28 | 'Operating System :: OS Independent', 29 | 'Programming Language :: Python', 30 | 'Programming Language :: Python :: 2', 31 | 'Programming Language :: Python :: 3', 32 | 'Topic :: Software Development :: Libraries :: Python Modules', 33 | ],) 34 | -------------------------------------------------------------------------------- /crmsh.tmpfiles.d.conf: -------------------------------------------------------------------------------- 1 | d /var/log/crmsh 0775 hacluster haclient - 2 | -------------------------------------------------------------------------------- /crmsh/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is required for python packages. 2 | # It is intentionally empty. 3 | -------------------------------------------------------------------------------- /crmsh/cache.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2008-2011 Dejan Muhamedagic 2 | # Copyright (C) 2018 Kristoffer Gronlund 3 | # See COPYING for license information. 4 | # 5 | # Cache stuff. A naive implementation. 6 | # Used by ra.py to cache named lists of things. 7 | 8 | import time 9 | 10 | 11 | _max_cache_age = 600.0 # seconds 12 | _stamp = time.time() 13 | _lists = {} 14 | 15 | 16 | def _clear(): 17 | "Clear the cache." 18 | global _stamp 19 | global _lists 20 | _stamp = time.time() 21 | _lists = {} 22 | 23 | 24 | def is_cached(name): 25 | "True if the argument exists in the cache." 26 | return retrieve(name) is not None 27 | 28 | 29 | def store(name, lst): 30 | """ 31 | Stores the given list for the given name. 32 | Returns the given list. 33 | """ 34 | _lists[name] = lst 35 | return lst 36 | 37 | 38 | def retrieve(name): 39 | """ 40 | Returns the cached list for name, or None. 41 | """ 42 | if time.time() - _stamp > _max_cache_age: 43 | _clear() 44 | return _lists.get(name) 45 | 46 | 47 | # vim:ts=4:sw=4:et: 48 | -------------------------------------------------------------------------------- /crmsh/cibverify.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2014 Kristoffer Gronlund 2 | # See COPYING for license information. 3 | 4 | import re 5 | from .sh import ShellUtils 6 | from . import log 7 | 8 | 9 | logger = log.setup_logger(__name__) 10 | cib_verify = "crm_verify -VV -p" 11 | VALIDATE_RE = re.compile(r"^Entity: line (\d)+: element (\w+): " + 12 | r"Relax-NG validity error : (.+)$") 13 | 14 | 15 | def _prettify(line, indent=0): 16 | m = VALIDATE_RE.match(line) 17 | if m: 18 | return "%s%s (%s): %s" % (indent*' ', m.group(2), m.group(1), m.group(3)) 19 | return line 20 | 21 | 22 | def verify(cib): 23 | rc, _, stderr = ShellUtils().get_stdout_stderr(cib_verify, cib.encode('utf-8')) 24 | for i, line in enumerate(line for line in stderr.split('\n') if line): 25 | indent = 0 if i == 0 else 7 26 | print(_prettify(line, indent)) 27 | return rc 28 | -------------------------------------------------------------------------------- /crmsh/crash_test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/crmsh/crash_test/__init__.py -------------------------------------------------------------------------------- /crmsh/crash_test/config.py: -------------------------------------------------------------------------------- 1 | FENCE_TIMEOUT = 60 2 | FENCE_NODE = "crm_attribute -t status -N '{}' -n terminate -v true" 3 | BLOCK_IP = '''iptables -{action} INPUT -s {peer_ip} -j DROP; 4 | iptables -{action} OUTPUT -d {peer_ip} -j DROP''' 5 | REMOVE_PORT = "firewall-cmd --zone=public --remove-port={port}/udp" 6 | ADD_PORT = "firewall-cmd --zone=public --add-port={port}/udp" 7 | FENCE_HISTORY = "stonith_admin -h {node}" 8 | SBD_CONF = "/etc/sysconfig/sbd" 9 | SBD_CHECK_CMD = "sbd -d {dev} dump" 10 | -------------------------------------------------------------------------------- /crmsh/crash_test/explain.py: -------------------------------------------------------------------------------- 1 | contents = {} 2 | 3 | contents["sbd"]= '''On {nodeA}, once the sbd process get killed, there are two situations: 4 | a) sbd process restarted 5 | Systemd will restart sbd service immediately. 6 | Restarting sbd service will also lead to restart corosync and pacemaker services because of the pre-defined dependencies among the systemd unit files. 7 | 8 | b) {nodeA} experience the watchdog fencing 9 | There is the race condition with the watchdog timer. Watchdog might reset {nodeA}, just before the sbd service get restarted and not tickle the watchdog timer in time.''' 10 | 11 | contents["sbd-l"] = '''On {nodeA}, the sbd service is killed consistantly all the time. 12 | Very quickly, systemd will hit the start limit to restart sbd service. 13 | Basically, in the end, systemd stops restarting anymore, marks the sbd service as failure. 14 | {nodeB} sbd cluster health check marks it as "UNHEALTHY". 15 | {nodeB} treats {nodeA} as a node lost, and fences it in the end.''' 16 | 17 | contents["corosync"] = '''On {nodeA}, once the corosync process get killed, systemd will restart corosync service immediately. There are two situations: 18 | a) corosync process restarts 19 | {nodeA} corosync process get restarted and rejoins to the existent membership quickly enough. 20 | Basically, it happens before {nodeB} treats it as a node lost. 21 | In the end, the cluster looks like nothing happened to the user. RA stays safe and sound. 22 | 23 | b) {nodeA} gets fenced 24 | {nodeA} gets fenced since {nodeB} corosync just ran out of timeout and treat it as a node lost and forms a new membership. 25 | The decision making process of {nodeB}, pengine(aka. schedulerd in Pacemaker 2), will initiate fence action against {nodeA}. ''' 26 | 27 | contents["corosync-l"] = '''The corosync service is killed consistantly all the time. 28 | Very quickly, systemd will hit the start limit to restart corosync service. 29 | Basically, in the end, systemd stops restarting anymore, marks the corosync service as failure. {nodeB} treats {nodeA} as a node lost, marks it as "unclean", and fence it in the end.''' 30 | 31 | contents["pacemakerd"] = '''The pacemakerd process gets restarted by systemd. All RAs must stay intact.''' 32 | -------------------------------------------------------------------------------- /crmsh/iproute2.py: -------------------------------------------------------------------------------- 1 | """Interface to iproute2 commands""" 2 | import dataclasses 3 | import ipaddress 4 | 5 | 6 | @dataclasses.dataclass 7 | class IPInterface: 8 | ifname: str 9 | flags: set[str] 10 | addr_info: set[ipaddress.IPv4Interface | ipaddress.IPv6Interface] 11 | 12 | 13 | class IPAddr: 14 | def __init__(self, json: list): 15 | # json: the output of 'ip -j addr' 16 | self._json = json 17 | 18 | def interfaces(self) -> list[IPInterface]: 19 | return [ 20 | IPInterface( 21 | interface['ifname'], 22 | set(interface['flags']), 23 | { 24 | ipaddress.ip_interface(f'{x["local"]}/{x["prefixlen"]}') 25 | for x in interface['addr_info'] 26 | }, 27 | ) 28 | for interface in self._json 29 | ] 30 | -------------------------------------------------------------------------------- /crmsh/migration-unsupported-resource-agents.txt: -------------------------------------------------------------------------------- 1 | ocf:heartbeat:AoEtarget 2 | ocf:heartbeat:AudibleAlarm 3 | ocf:heartbeat:EvmsSCC 4 | ocf:heartbeat:Evmsd 5 | ocf:heartbeat:ICP 6 | ocf:heartbeat:IPaddr,ocf:heartbeat:IPaddr2 7 | ocf:heartbeat:IPv6addr,ocf:heartbeat:IPaddr2 8 | ocf:heartbeat:LVM,ocf:heartbeat:LVM-activate,deprecated 9 | ocf:heartbeat:LinuxSCSI 10 | ocf:heartbeat:ManageRAID 11 | ocf:heartbeat:ManageVE 12 | ocf:heartbeat:Pure-FTPd 13 | ocf:heartbeat:Raid1,ocf:heartbeat:mdraid,deprecated 14 | ocf:heartbeat:ServeRAID 15 | ocf:heartbeat:SphinxSearchDaemon 16 | ocf:heartbeat:SysInfo 17 | ocf:heartbeat:VIPArip 18 | ocf:heartbeat:WinPopup 19 | ocf:heartbeat:Xen 20 | ocf:heartbeat:asterisk 21 | ocf:heartbeat:clvm 22 | ocf:heartbeat:conntrackd 23 | ocf:heartbeat:dnsupdate 24 | ocf:heartbeat:dovecot 25 | ocf:heartbeat:eDir88 26 | ocf:heartbeat:fio 27 | ocf:heartbeat:gcp-vpc-move-route 28 | ocf:heartbeat:ids 29 | ocf:heartbeat:ipsec 30 | ocf:heartbeat:iscsi 31 | ocf:heartbeat:jboss 32 | ocf:heartbeat:jira 33 | ocf:heartbeat:kamailio 34 | ocf:heartbeat:lxc 35 | ocf:heartbeat:minio 36 | ocf:heartbeat:nagios 37 | ocf:heartbeat:ocivip 38 | ocf:heartbeat:openstack-cinder-volume 39 | ocf:heartbeat:openstack-floating-ip 40 | ocf:heartbeat:openstack-info 41 | ocf:heartbeat:openstack-virtual-ip 42 | ocf:heartbeat:lxd-info 43 | ocf:heartbeat:machine-info 44 | ocf:heartbeat:pingd,ocf:pacemaker:ping 45 | ocf:heartbeat:pound 46 | ocf:heartbeat:proftpd 47 | ocf:heartbeat:rkt 48 | ocf:heartbeat:rsyslog 49 | ocf:heartbeat:scsi2reservation 50 | ocf:heartbeat:smb-share 51 | ocf:heartbeat:sybaseASE 52 | ocf:heartbeat:syslog-ng 53 | ocf:heartbeat:varnish 54 | ocf:heartbeat:vdo-vol 55 | ocf:heartbeat:vmware 56 | ocf:heartbeat:vsftpd 57 | ocf:heartbeat:zabbixserver 58 | ocf:pacemaker:o2cb 59 | stonith:fence_amt 60 | stonith:fence_compute 61 | stonith:fence_docker 62 | stonith:fence_evacuate 63 | stonith:fence_ldom 64 | stonith:fence_legacy 65 | stonith:fence_openstack 66 | stonith:fence_powerman 67 | stonith:fence_rhevm 68 | stonith:fence_xenapi 69 | stonith:fence_zvm 70 | stonith:external/sbd,stonith:fence_sbd 71 | -------------------------------------------------------------------------------- /crmsh/options.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2008-2011 Dejan Muhamedagic 2 | # Copyright (C) 2013 Kristoffer Gronlund 3 | # See COPYING for license information. 4 | ''' 5 | Session-only options (not saved). 6 | ''' 7 | 8 | interactive = False 9 | batch = False 10 | ask_no = False 11 | regression_tests = False 12 | profile = "" 13 | history = "live" 14 | input_file = "" 15 | shadow = "" 16 | scriptdir = "" 17 | # set to true when completing non-interactively 18 | shell_completion = False 19 | -------------------------------------------------------------------------------- /crmsh/prun/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/crmsh/prun/__init__.py -------------------------------------------------------------------------------- /crmsh/pyshim.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | 4 | try: 5 | from functools import cache 6 | except ImportError: 7 | def cache(f): 8 | cached_return_value = dict() 9 | 10 | @functools.wraps(f) 11 | def wrapper(*args, **kwargs): 12 | nonlocal cached_return_value 13 | key = (tuple(args), tuple(sorted(kwargs.items()))) 14 | try: 15 | return cached_return_value[key] 16 | except KeyError: 17 | ret = f(*args, **kwargs) 18 | cached_return_value[key] = ret 19 | return ret 20 | 21 | return wrapper 22 | -------------------------------------------------------------------------------- /crmsh/report/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/crmsh/report/__init__.py -------------------------------------------------------------------------------- /crmsh/tmpfiles.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2013 Kristoffer Gronlund 2 | # Copyright (C) 2008-2011 Dejan Muhamedagic 3 | # See COPYING for license information. 4 | ''' 5 | Files added to tmpfiles are removed at program exit. 6 | ''' 7 | 8 | import os 9 | import shutil 10 | import atexit 11 | from tempfile import mkstemp, mkdtemp 12 | 13 | from . import utils 14 | 15 | _FILES = [] 16 | _DIRS = [] 17 | 18 | 19 | def _exit_handler(): 20 | "Called at program exit" 21 | for f in _FILES: 22 | try: 23 | os.unlink(f) 24 | except OSError: 25 | pass 26 | for d in _DIRS: 27 | try: 28 | shutil.rmtree(d) 29 | except OSError: 30 | pass 31 | 32 | 33 | def _mkdir(directory): 34 | if not os.path.isdir(directory): 35 | try: 36 | os.makedirs(directory) 37 | except OSError as err: 38 | raise ValueError("Failed to create directory: %s" % (err)) 39 | 40 | 41 | def add(filename): 42 | ''' 43 | Remove the named file at program exit. 44 | ''' 45 | if len(_FILES) + len(_DIRS) == 0: 46 | atexit.register(_exit_handler) 47 | _FILES.append(filename) 48 | 49 | 50 | def create(directory=None, prefix='crmsh_'): 51 | ''' 52 | Create a temporary file and remove it at program exit. 53 | Returns (fd, filename) 54 | ''' 55 | if not directory: 56 | directory = utils.get_tempdir() 57 | _mkdir(directory) 58 | fd, fname = mkstemp(dir=directory, prefix=prefix) 59 | add(fname) 60 | return fd, fname 61 | 62 | 63 | def create_dir(directory=None, prefix='crmsh_'): 64 | ''' 65 | Create a temporary directory and remove it at program exit. 66 | ''' 67 | if not directory: 68 | directory = utils.get_tempdir() 69 | _mkdir(directory) 70 | ret = mkdtemp(dir=directory, prefix=prefix) 71 | if len(_FILES) + len(_DIRS) == 0: 72 | atexit.register(_exit_handler) 73 | _DIRS.append(ret) 74 | return ret 75 | -------------------------------------------------------------------------------- /doc/.gitignore: -------------------------------------------------------------------------------- 1 | generated-sources/ 2 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean subdirs website website-clean 2 | 3 | all: crm.8.html generated-sources/crm.8.aio.adoc 4 | 5 | generated-sources: 6 | mkdir -p $@ 7 | 8 | generated-sources/Makefile: crm.8.adoc generated-sources 9 | adocxt gen-makefile < $< > $@ 10 | 11 | subdirs: generated-sources/Makefile 12 | $(MAKE) -C generated-sources all 13 | 14 | generated-sources/crm.8.adoc: crm.8.adoc subdirs 15 | adocxt gen-include < $< > $@ 16 | 17 | generated-sources/crm.8.aio.adoc: generated-sources/crm.8.adoc generated-sources/profiles.adoc 18 | adocaio $< > $@ 19 | 20 | generated-sources/profiles.adoc: profiles.adoc generated-sources 21 | cp $< $@ 22 | 23 | crm.8.html: generated-sources/crm.8.aio.adoc 24 | asciidoctor $< 25 | 26 | website: generated-sources/crm.8.adoc 27 | $(MAKE) -C website-v1 all 28 | 29 | website-clean: 30 | $(MAKE) -C website-v1 clean 31 | 32 | clean: website-clean 33 | $(RM) -r generated-sources crm.8.html 34 | -------------------------------------------------------------------------------- /doc/bootstrap-todo.md: -------------------------------------------------------------------------------- 1 | # Bootstrap TODO 2 | 3 | (inherited from the bootstrap project) 4 | 5 | ## Unclear Responsibility 6 | 7 | These may be in purview of ha-cluster-bootstrap, or may be in appliance image: 8 | 9 | * install debuginfo packages 10 | * enable coredumps 11 | 12 | 13 | ## General / Random 14 | 15 | * csync2_remote assumes there's only one group in csync2.cfg, or, more to 16 | the point, will only add new hosts to the first group. 17 | * Likewise, ssh_merge grabs all hosts regardless of what group they're in 18 | (although this is probably fine) 19 | * get rid of curses junk in log file (fix ENV term) 20 | * Multi-device SBD (use multiple -s args) 21 | * Start new node on standby 22 | # crm configure node node-1 attributes standby="on" 23 | # crm node clearstate node-1 (requires confirmation) 24 | - start new node, unbelievably it works! 25 | # crm node online node-1 26 | * don't error to log if log not started 27 | * is "partx -a " any sort of sane replacement for partprobe? 28 | * Use ssh-copy-id instead of manual fiddling with authorized_keys? 29 | 30 | 31 | ## STONITH Config 32 | 33 | * See https://bugzilla.novell.com/show_bug.cgi?id=722405 for stonith timeout suggestions 34 | 35 | 36 | ## Template Mode 37 | 38 | Generally specific to OCFS2 template ATM, as that's the only one extant. 39 | 40 | * Very long path to partition (/dev/disk/by-path/... for iSCSI) means we 41 | can't determine paths to new partitions, thanks to bnc#722959. Unclear 42 | if this will be fixed for SP2. 43 | * /dev/disk/by-id/dm-name paths are unreliable (at least for determining 44 | partitions after carving the device up). 45 | * Probably need to prompt user for new partitions after carving, should 46 | they not be found (FFS). 47 | * ocfs2 template is not the same as Hawk's. Consider enhancing 48 | ha-cluster-bootstrap so it uses Hawk's templates directly rather than using 49 | its own. 50 | * Ensure required RPMs are installed when running template (they're just 51 | Recommends in the spec) 52 | * Specifying sbd without ocfs2 partition may be incompatible with ocfs2 53 | template (need to test) 54 | * block device size " blockdev --getsz" etc. (when making OCFS2 partition 55 | with "-T vmstore") 56 | 57 | -------------------------------------------------------------------------------- /doc/crmsh_crm_report.8.adoc: -------------------------------------------------------------------------------- 1 | :man source: crmsh_crm_report 2 | :man version: 4.6.0 3 | :man manual: crmsh documentation 4 | 5 | crmsh_crm_report(8) 6 | ================== 7 | 8 | NAME 9 | ---- 10 | crmsh_crm_report - create report for CRM based clusters (Pacemaker) 11 | 12 | 13 | SEE ALSO 14 | -------- 15 | See "crm help report" or "crm report --help" 16 | -------------------------------------------------------------------------------- /doc/profiles.adoc: -------------------------------------------------------------------------------- 1 | === /etc/crm/profiles.yml 2 | 3 | ==== Purpose 4 | 5 | YAML file `/etc/crm/profiles.yml` contains Corosync, SBD and Pacemaker parameters for different platforms. 6 | 7 | crmsh bootstrap detects system environment and load the corresponding parameters predefined in this file. 8 | 9 | ==== Syntax 10 | 11 | ............ 12 | profile_name: 13 | key_name: value 14 | ............ 15 | 16 | The valid profile names are: 17 | "microsoft-azure", "google-cloud-platform", "amazon-web-services", "s390", "default" 18 | 19 | `key_name` is a known Corosync, SBD, or Pacemaker parameters, like 20 | `corosync.totem.token` or `sbd.watchdog_timeout`. 21 | 22 | More details about the parameter definitions please refer to the man page of corosync.conf(5), sbd(8). 23 | 24 | Example 25 | ............ 26 | default: 27 | corosync.totem.crypto_hash: sha1 28 | corosync.totem.crypto_cipher: aes256 29 | corosync.totem.token: 5000 30 | corosync.totem.join: 60 31 | corosync.totem.max_messages: 20 32 | corosync.totem.token_retransmits_before_loss_const: 10 33 | sbd.watchdog_timeout: 15 34 | 35 | microsoft-azure: 36 | corosync.totem.token: 30000 37 | sbd.watchdog_timeout: 60 38 | ............ 39 | 40 | ==== How the content of the file is interpreted 41 | 42 | The profiles has the following properties: 43 | 44 | * Profiles are only loaded on bootstrap init node. 45 | * The "default" profile is loaded in the beginning. 46 | * Specific profiles will override the corresponding values in the "default" profile (if the specific environment is detected). 47 | * Users could customize the "default" profile for their needs. For example, those on-premise environments which is not defined yet. 48 | -------------------------------------------------------------------------------- /doc/toolchain/Containerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/alpine:3.19.0 2 | 3 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories 4 | RUN apk add --no-cache \ 5 | py3-lxml \ 6 | py3-yaml \ 7 | py3-dateutil\ 8 | bash \ 9 | make \ 10 | asciidoctor \ 11 | asciidoc 12 | 13 | 14 | env PYTHONPATH=/opt/crmsh 15 | env PATH=/opt/crmsh/bin:/opt/crmsh/doc/toolchain/bin:"${PATH}" 16 | 17 | WORKDIR /opt/crmsh/doc 18 | CMD make 19 | -------------------------------------------------------------------------------- /doc/toolchain/bin/adocaio: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Preprocessor for include:: directive""" 3 | 4 | 5 | import os.path 6 | import re 7 | import sys 8 | 9 | 10 | RE_INCLUDE_DIRECTIVE = re.compile('^include::(.*)\\[]\\s*$') 11 | 12 | 13 | def all_in_one(path: str, out): 14 | dir = os.path.dirname(os.path.abspath(path)) 15 | with open(path, 'r', encoding='utf-8') as f: 16 | for line in f: 17 | found = RE_INCLUDE_DIRECTIVE.match(line) 18 | if not found: 19 | out.write(line) 20 | else: 21 | included_filepath = f'{dir}/{found.group(1)}' 22 | all_in_one(included_filepath, out) 23 | 24 | 25 | def main(): 26 | if len(sys.argv) != 2: 27 | print(f'usage: {sys.argv[0]} [infile]') 28 | sys.exit(1) 29 | all_in_one(sys.argv[1], sys.stdout) 30 | 31 | 32 | if __name__ == '__main__': 33 | main() 34 | -------------------------------------------------------------------------------- /doc/toolchain/bin/help2adoc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}") 4 | export PYTHONPATH=$(readlink -f "${SCRIPT_DIR}"/../lib) 5 | python3 -m help2adoc.main "$@" 6 | -------------------------------------------------------------------------------- /doc/toolchain/lib/help2adoc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/toolchain/lib/help2adoc/__init__.py -------------------------------------------------------------------------------- /doc/toolchain/lib/help2adoc/generator.py: -------------------------------------------------------------------------------- 1 | from .parser import Parser 2 | 3 | import typing 4 | import re 5 | 6 | 7 | class AsciiDocGenerator(Parser): 8 | USAGE_RE = re.compile('^usage:\\s*') 9 | 10 | def __init__(self, output: typing.Callable[[str], None]): 11 | self.output = output 12 | 13 | def on_usage(self, text: str): 14 | usage = text[self.USAGE_RE.match(text).end():] 15 | self.output('Usage:\n\n ') 16 | self.output(usage) 17 | self.output('\n\n') 18 | 19 | def on_paragraph(self, text: str): 20 | self.output(self.escape(text)) 21 | self.output('\n\n') 22 | 23 | def enter_options(self): 24 | self.output('Options:\n\n') 25 | 26 | def exit_options(self): 27 | self.output('\n') 28 | 29 | def on_option(self, option: str, help: str): 30 | self.output('* `+++') 31 | self.output(option) 32 | self.output('+++`: ') 33 | self.output(self.escape(help)) 34 | self.output('\n\n') 35 | 36 | def enter_option_group(self, name: str): 37 | self.output(name) 38 | self.output(':\n\n') 39 | 40 | def exit_option_group(self, name: str): 41 | self.output('\n') 42 | 43 | def enter_description(self): 44 | pass 45 | 46 | def exit_description(self): 47 | pass 48 | 49 | def escape(self, text: str): 50 | # TODO 51 | return text 52 | -------------------------------------------------------------------------------- /doc/toolchain/lib/help2adoc/main.py: -------------------------------------------------------------------------------- 1 | from .parser import lexer, LookAheadIterator 2 | from .generator import AsciiDocGenerator 3 | 4 | from argparse import ArgumentParser 5 | import sys 6 | 7 | def main(): 8 | ap = ArgumentParser('help2adoc') 9 | ap.add_argument('file') 10 | args = ap.parse_args() 11 | with open(args.file, 'r') as f: 12 | tokens = LookAheadIterator(lexer(f)) 13 | AsciiDocGenerator(sys.stdout.write).parse_help(tokens) 14 | token = tokens.lookahead() 15 | if token is None: 16 | return 17 | epilog_start = token.lineno 18 | f.seek(0) 19 | for i in range(epilog_start): 20 | next(f) 21 | print('....') 22 | for line in f: 23 | print(line, end='') 24 | print('....') 25 | 26 | 27 | if __name__ == '__main__': 28 | main() 29 | -------------------------------------------------------------------------------- /doc/website-v1/.gitignore: -------------------------------------------------------------------------------- 1 | ./news.adoc 2 | -------------------------------------------------------------------------------- /doc/website-v1/404.adoc: -------------------------------------------------------------------------------- 1 | 404: Page not found 2 | =================== 3 | 4 | Apologies, but there is nothing here! 5 | 6 | The page you are looking for may have moved. 7 | 8 | * link:/documentation[Documentation] 9 | * link:/faq[Frequently Asked Questions] 10 | -------------------------------------------------------------------------------- /doc/website-v1/about.adoc: -------------------------------------------------------------------------------- 1 | = About = 2 | 3 | == Authors == 4 | 5 | include::../../AUTHORS[] 6 | 7 | == Site == 8 | 9 | This site was generated from http://asciidoc.org[AsciiDoc] sources. 10 | 11 | The CSS for this site started as a clone of the +bare+ theme by https://github.com/rtomayko/adoc-themes[Ryan Tomayko]. 12 | 13 | Fonts used are https://www.google.com/fonts/specimen/Open+Sans[Open Sans] and http://fontawesome.io[Font Awesome]. 14 | 15 | == License == 16 | 17 | `crmsh` is licensed under the GNU General Public License (GPL). 18 | 19 | For more information, see https://gnu.org/licenses/gpl.html 20 | -------------------------------------------------------------------------------- /doc/website-v1/development.adoc: -------------------------------------------------------------------------------- 1 | = Development = 2 | 3 | == Tools == 4 | 5 | ++++ 6 | 13 | ++++ 14 | 15 | == Source Code == 16 | 17 | The source code for `crmsh` is kept in a 18 | http://git-scm.com/[git] repository 19 | hosted at https://github.com[github]. Use +git+ to get a working copy: 20 | 21 | ---- 22 | git clone https://github.com/ClusterLabs/crmsh.git 23 | ---- 24 | 25 | Dependencies 26 | ~~~~~~~~~~~~ 27 | 28 | Building and installing crmsh requires Python version 2.6 and up (but not 3, yet). 29 | 30 | Additionally, the following Python modules are needed: 31 | 32 | * `lxml` 33 | * `PyYAML` 34 | * `setuptools` 35 | * `parallax` 36 | * `python-dateutil` 37 | 38 | Building 39 | ~~~~~~~~ 40 | 41 | `crmsh` uses the autotools suite to manage the build process. 42 | 43 | ---- 44 | ./autogen.sh 45 | ./configure 46 | make 47 | make install 48 | ---- 49 | 50 | === Tests === 51 | 52 | The unit tests for `crmsh` require +nose+ to run. On most distributions, this can be installed 53 | by installing the package +python-nose+, or using +pip+. 54 | 55 | To run the unit test suite, go to the source code directory of `crmsh` 56 | and call: 57 | 58 | ---- 59 | ./test/run 60 | ---- 61 | 62 | `crmsh` also comes with a comprehensive regression test suite. The regression tests need 63 | to run after installation, on a system which has both crmsh and pacemaker installed. You 64 | will also need to install +pacemaker+ development headers. 65 | 66 | * link:https://github.com/ClusterLabs/pacemaker[pacemaker] 67 | 68 | To execute the tests, call: 69 | 70 | ---- 71 | /usr/share/crmsh/tests/regression.sh 72 | cat crmtestout/regression.out 73 | ---- 74 | -------------------------------------------------------------------------------- /doc/website-v1/documentation.adoc: -------------------------------------------------------------------------------- 1 | = Documentation = 2 | 3 | The main documentation for `crmsh` comes in the form of the 4 | `manual`, which is the same help as found using the `help` 5 | command in the interactive shell. 6 | 7 | Additionally, there are a couple of guides and other documents 8 | that will hopefully make using the shell as easy as possible. 9 | 10 | == Manual == 11 | 12 | * link:/man[Manual (Development)] 13 | * link:/man-4.6[Manual (v4.6.x)] 14 | * link:/man-4.3[Manual (v4.3.x)] 15 | * link:/man-3[Manual (v3.x)] 16 | * link:/man-2.0[Manual (v2.x)] 17 | * link:/man-1.2[Manual (v1.2.x)] 18 | 19 | == Guides == 20 | 21 | * link:/start-guide[Getting Started] 22 | * link:/history-guide[History Guide] 23 | * link:/rsctest-guide[Resource Testing Guide] 24 | * link:/configuration[Configuration] 25 | * link:/scripts[Cluster scripts] 26 | * link:/faq[Frequently Asked Questions] 27 | 28 | == Translations == 29 | 30 | * https://blog.3ware.co.jp/2015/05/crmsh-getting-started/[Getting Started (Japanese)] 31 | 32 | == External documentation == 33 | 34 | The SUSE 35 | https://www.suse.com/documentation/sle_ha/book_sleha/?page=/documentation/sle_ha/book_sleha/data/book_sleha.html[High 36 | Availability Guide] provides a guide to 37 | installing and configuring a complete cluster solution including both 38 | the `crm` shell and Hawk, the web GUI which uses the `crm` shell as 39 | its backend. 40 | 41 | For more information on Pacemaker in general, see the 42 | http://clusterlabs.org/doc/[Pacemaker documentation] at `clusterlabs.org`. 43 | 44 | -------------------------------------------------------------------------------- /doc/website-v1/download.adoc: -------------------------------------------------------------------------------- 1 | = Download = 2 | 3 | The easiest way to install `crmsh` is via the package manager of your distribution. 4 | 5 | == SLES / openSUSE == 6 | 7 | `crmsh` is commercially supported on SLE via the https://www.suse.com/products/highavailability/[SUSE Linux Enterprise High Availability Extension]. It is also available for openSUSE with the package name `crmsh`. Development packages can be downloaded from the OBS: 8 | 9 | * https://build.opensuse.org/package/show/network:ha-clustering:Stable/crmsh[Stable version] 10 | * https://build.opensuse.org/package/show/network:ha-clustering:Factory/crmsh[Development version] 11 | 12 | == Red Hat / CentOS / Fedora == 13 | 14 | We try to build Red Hat / CentOS / Fedora-compatible RPM packages on the OBS (see above). 15 | 16 | === CentOS 7 === 17 | 18 | --- 19 | dnf config-manager --add-repo http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/network:ha-clustering:Stable.repo 20 | --- 21 | 22 | == Debian == 23 | 24 | The versions of `crmsh` and `pacemaker` currently available in the latest Debian release are quite old. Newer packages are available via the Debian-HA team https://wiki.debian.org/Debian-HA[wiki] and the distribution packages will hopefully be updated soon. 25 | 26 | == Ubuntu == 27 | 28 | Packages for `crmsh` are available from the https://launchpad.net/ubuntu/+source/crmsh[Launchpad]. 29 | 30 | == Gentoo == 31 | 32 | A fairly up-to-date version is available https://packages.gentoo.org/packages/sys-cluster/crmsh[here]. 33 | 34 | == Arch == 35 | 36 | `crmsh` is available via the https://aur.archlinux.org/packages/ha-pacemaker-crmsh/[AUR]. Unfortunately the package seems somewhat out of date. 37 | 38 | == Source Packages == 39 | 40 | Releases are available as `.tar.gz` or `.zip` archives via https://github.com/ClusterLabs/crmsh/releases[Github]. 41 | -------------------------------------------------------------------------------- /doc/website-v1/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /doc/website-v1/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /doc/website-v1/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /doc/website-v1/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /doc/website-v1/img/history-guide/sample-cluster.conf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/history-guide/sample-cluster.conf.png -------------------------------------------------------------------------------- /doc/website-v1/img/history-guide/smallapache-start.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/history-guide/smallapache-start.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/README: -------------------------------------------------------------------------------- 1 | Replaced the plain DocBook XSL admonition icons with Jimmac's DocBook 2 | icons (http://jimmac.musichall.cz/ikony.php3). I dropped transparency 3 | from the Jimmac icons to get round MS IE and FOP PNG incompatibilies. 4 | 5 | Stuart Rackham 6 | -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/1.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/10.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/11.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/12.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/13.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/14.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/15.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/2.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/3.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/4.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/5.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/6.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/7.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/8.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/callouts/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/callouts/9.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/caution.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/caution.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/example.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/home.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/important.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/important.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/next.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/next.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/note.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/note.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/prev.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/prev.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/tip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/tip.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/up.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/up.png -------------------------------------------------------------------------------- /doc/website-v1/img/icons/warning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/icons/warning.png -------------------------------------------------------------------------------- /doc/website-v1/img/laptop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/laptop.png -------------------------------------------------------------------------------- /doc/website-v1/img/loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/loader.gif -------------------------------------------------------------------------------- /doc/website-v1/img/servers.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/doc/website-v1/img/servers.gif -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/basic-transition.typescript: -------------------------------------------------------------------------------- 1 | crm(live)history# transition <1> 2 | INFO: running ptest with /var/cache/crm/history/live/sle12-c/pengine/pe-input-1907.bz2 3 | INFO: starting dotty to show transition graph <2> 4 | Current cluster status: <3> 5 | Online: [ sle12-a sle12-c ] 6 | s-libvirt (stonith:external/libvirt): Started sle12-c 7 | ... 8 | small-apache (ocf::heartbeat:apache): Stopped 9 | Transition Summary: 10 | * Start small-apache (sle12-a) 11 | Executing cluster transition: 12 | * Resource action: small-apache start on sle12-a 13 | Revised cluster status: 14 | Online: [ sle12-a sle12-c ] 15 | s-libvirt (stonith:external/libvirt): Started sle12-c 16 | ... 17 | small-apache (ocf::heartbeat:apache): Started sle12-a 18 | 19 | Transition sle12-c:pe-input-1907 (20:30:14 - 20:30:15): <4> 20 | total 1 actions: 1 Complete 21 | Apr 15 20:30:14 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: start small-apache_start_0 on sle12-a 22 | Apr 15 20:30:14 sle12-a apache(small-apache)[1586]: INFO: AH00558: httpd2: Could not reliably determine the server's fully qualified domain name, using 10.2.12.51. Set the 'ServerName' directive globally to suppress this message 23 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/diff.typescript: -------------------------------------------------------------------------------- 1 | crm(live)history# diff -1 0 <1> 2 | --- -1 3 | +++ 0 4 | @@ -11 +11 @@ 5 | -primitive small-apache apache params configfile="/etc/apache2/small.conf" meta target-role=Stopped 6 | +primitive small-apache apache params configfile="/etc/apache2/small.conf" meta target-role=Started 7 | crm(live)history# diff -1 0 status <2> 8 | --- -1 9 | +++ 0 10 | @@ -15 +14,0 @@ 11 | - small-apache (ocf::heartbeat:apache): Started sle12-a 12 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/info.typescript: -------------------------------------------------------------------------------- 1 | # crm history 2 | crm(live)history# timeframe "Apr 15 20:25" "Apr 15 20:35" 3 | crm(live)history# info 4 | Source: live 5 | Created on: Thu Apr 16 11:32:36 CEST 2015 6 | By: report -Z -Q -f Wed Apr 15 20:25:00 2015 -t 2015-04-15 20:35:00 /var/cache/crm/history/live 7 | Period: 2015-04-15 20:25:00 - 2015-04-15 20:35:00 8 | Nodes: sle12-a sle12-c 9 | Groups: nfs-srv nfs-disk 10 | Resources: s-libvirt p_drbd_nfs nfs-vg fs1 virtual-ip nfs-server websrv websrv-ip small-apache 11 | Transitions: 1906 1907 12 | crm(live)history# peinputs v 13 | Date Start End Filename Client User Origin 14 | ==== ===== === ======== ====== ==== ====== 15 | 2015-04-15 20:29:59 20:30:01 pe-input-1906 no-client no-user no-origin 16 | 2015-04-15 20:30:14 20:30:15 pe-input-1907 no-client no-user no-origin 17 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/nfs-probe-err.typescript: -------------------------------------------------------------------------------- 1 | # crm history resource nfs-server 2 | INFO: fetching new logs, please wait ... 3 | Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14911]: <1> ERROR: NFS server is up, but the locking daemons are down 4 | Dec 16 11:53:23 sle12-c crmd[2823]: notice: te_rsc_command: Initiating action 54: stop nfs-server_stop_0 on sle12-a 5 | Dec 16 11:53:23 sle12-c crmd[2823]: notice: te_rsc_command: Initiating action 3: stop nfs-server_stop_0 on sle12-c (local) 6 | Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: Stopping NFS server ... 7 | Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: Stopping sm-notify 8 | Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: Stopping rpc.statd 9 | Dec 16 11:53:23 sle12-c nfsserver(nfs-server)[14944]: INFO: NFS server stopped 10 | Dec 16 11:53:23 sle12-c crmd[2823]: notice: te_rsc_command: Initiating action 55: start nfs-server_start_0 on sle12-a 11 | Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: Stopping NFS server ... 12 | Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: Stopping sm-notify 13 | Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: Stopping rpc.statd 14 | Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23255]: INFO: NFS server stopped 15 | Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23320]: INFO: Starting NFS server ... 16 | Dec 16 11:53:23 sle12-a nfsserver(nfs-server)[23320]: INFO: Starting rpc.statd. 17 | Dec 16 11:53:24 sle12-a nfsserver(nfs-server)[23320]: INFO: executing sm-notify 18 | Dec 16 11:53:24 sle12-a nfsserver(nfs-server)[23320]: INFO: NFS server started 19 | Dec 16 11:53:24 sle12-a lrmd[6904]: <2> notice: operation_finished: nfs-server_start_0:23320:stderr [ id: rpcuser: no such user ] 20 | Dec 16 11:53:24 sle12-a lrmd[6904]: message repeated 3 times: [ notice: operation_finished: nfs-server_start_0:23320:stderr [ id: rpcuser: no such user ]] 21 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/resource-trace.typescript: -------------------------------------------------------------------------------- 1 | # crm resource trace nfs-server monitor 0 2 | INFO: Trace for nfs-server:monitor is written to /var/lib/heartbeat/trace_ra/ 3 | INFO: Trace set, restart nfs-server to trace non-monitor operations 4 | # crm resource cleanup nfs-server 5 | Cleaning up nfs-server on sle12-a 6 | Cleaning up nfs-server on sle12-c 7 | Waiting for 2 replies from the CRMd.. OK 8 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/resource.typescript: -------------------------------------------------------------------------------- 1 | crm(live)history# resource small-apache 2 | Apr 15 20:29:59 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: stop small-apache_stop_0 on sle12-a 3 | Apr 15 20:29:59 sle12-a apache(small-apache)[1366]: INFO: Attempting graceful stop of apache PID 9155 4 | Apr 15 20:30:01 sle12-a apache(small-apache)[1366]: INFO: apache stopped. 5 | Apr 15 20:30:14 sle12-a apache(small-apache)[1586]: INFO: AH00558: httpd2: Could not reliably determine the server's fully qualified domain name, using 10.2.12.51. Set the 'ServerName' directive globally to suppress this message 6 | Apr 15 20:30:14 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: start small-apache_start_0 on sle12-a 7 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/sample-cluster.conf.crm: -------------------------------------------------------------------------------- 1 | node 167906357: sle12-c 2 | node 167906355: sle12-a 3 | primitive s-libvirt stonith:external/libvirt \ 4 | params hostlist="sle12-a sle12-c" hypervisor_uri="qemu+ssh://hex-10.suse.de/system?keyfile=/root/.ssh/xen" reset_method=reboot \ 5 | op monitor interval=5m timeout=60s 6 | primitive p_drbd_nfs ocf:linbit:drbd \ 7 | params drbd_resource=nfs \ 8 | op monitor interval=15 role=Master \ 9 | op monitor interval=30 role=Slave \ 10 | op start interval=0 timeout=300 \ 11 | op stop interval=0 timeout=120 12 | primitive nfs-vg LVM \ 13 | params volgrpname=nfs-vg 14 | primitive fs1 Filesystem \ 15 | params device="/dev/nfs-vg/fs1" directory="/srv/nfs" fstype=ext3 \ 16 | op monitor interval=30s 17 | primitive virtual-ip IPaddr2 \ 18 | params ip=10.2.12.100 19 | primitive nfs-server nfsserver \ 20 | params nfs_shared_infodir="/srv/nfs/state" nfs_ip=10.2.12.100 \ 21 | op monitor interval=30s 22 | primitive websrv apache \ 23 | params configfile="/etc/apache2/httpd.conf" \ 24 | op monitor interval=30 25 | primitive websrv-ip IPaddr2 \ 26 | params ip=10.2.12.101 27 | primitive small-apache apache \ 28 | params configfile="/etc/apache2/small.conf" 29 | group nfs-disk nfs-vg fs1 30 | group nfs-srv virtual-ip nfs-server 31 | ms ms_drbd_nfs p_drbd_nfs \ 32 | meta notify=true clone-max=2 33 | location nfs-pref virtual-ip 100: sle12-a 34 | location websrv-pref websrv 100: sle12-c 35 | colocation vg-with-drbd inf: nfs-vg ms_drbd_nfs:Master 36 | colocation c-nfs inf: nfs-srv nfs-disk 37 | colocation c-websrv inf: websrv websrv-ip 38 | colocation small-apache-with-virtual-ip inf: small-apache virtual-ip 39 | # need fs1 for the NFS server 40 | order o-nfs inf: nfs-disk nfs-srv 41 | # websrv serves requests at IP websrv-ip 42 | order o-websrv inf: websrv-ip websrv 43 | # small apache serves requests at IP virtual-ip 44 | order virtual-ip-before-small-apache inf: virtual-ip small-apache 45 | # drbd device is the nfs-vg PV 46 | order drbd-before-nfs-vg inf: ms_drbd_nfs:promote nfs-vg:start 47 | property cib-bootstrap-options: \ 48 | dc-version=1.1.12-ad083a8 \ 49 | cluster-infrastructure=corosync \ 50 | cluster-name=sle12-test3l-public \ 51 | no-quorum-policy=ignore \ 52 | last-lrm-refresh=1429192263 53 | op_defaults op-options: \ 54 | timeout=120s 55 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/status-probe-fail.typescript: -------------------------------------------------------------------------------- 1 | # crm status 2 | Last updated: Tue Dec 16 11:57:04 2014 3 | Last change: Tue Dec 16 11:53:22 2014 4 | Stack: corosync 5 | Current DC: sle12-c (167906357) - partition with quorum 6 | Version: 1.1.12-ad083a8 7 | 2 Nodes configured 8 | 10 Resources configured 9 | Online: [ sle12-a sle12-c ] 10 | [...] 11 | nfs-server (ocf::heartbeat:nfsserver): Started sle12-a 12 | [...] 13 | Failed actions: 14 | nfs-server_monitor_0 on sle12-c 'unknown error' (1): call=298, status=complete, 15 | last-rc-change='Tue Dec 16 11:53:23 2014', queued=0ms, exec=135ms 16 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/stonith-corosync-stopped.typescript: -------------------------------------------------------------------------------- 1 | # crm history node sle12-c 2 | INFO: fetching new logs, please wait ... 3 | Dec 19 14:36:18 sle12-c corosync[29551]: [MAIN ] Corosync Cluster Engine ('2.3.3'): started and ready to provide service. 4 | Dec 19 14:36:19 sle12-c corosync[29545]: Starting Corosync Cluster Engine (corosync): [ OK ] 5 | Dec 19 14:36:20 sle12-a pengine[6906]: warning: pe_fence_node: Node sle12-c will be fenced because our peer process is no longer available 6 | Dec 19 14:36:20 sle12-a pengine[6906]: warning: stage6: Scheduling Node sle12-c for STONITH 7 | Dec 19 14:36:20 sle12-a crmd[6907]: notice: te_fence_node: Executing reboot fencing operation (65) on sle12-c (timeout=60000) 8 | Dec 19 14:36:20 sle12-a crmd[6907]: notice: peer_update_callback: Node return implies stonith of sle12-c (action 65) completed 9 | -------------------------------------------------------------------------------- /doc/website-v1/include/history-guide/transition-log.typescript: -------------------------------------------------------------------------------- 1 | crm(live)history# transition log 2 | INFO: retrieving information from cluster nodes, please wait ... 3 | Apr 15 20:30:14 sle12-c crmd[1136]: notice: do_state_transition: State transition S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph ] 4 | Apr 15 20:30:14 sle12-c stonithd[1132]: notice: unpack_config: On loss of CCM Quorum: Ignore 5 | Apr 15 20:30:14 sle12-c pengine[1135]: notice: unpack_config: On loss of CCM Quorum: Ignore 6 | Apr 15 20:30:14 sle12-c pengine[1135]: notice: LogActions: Start small-apache#011(sle12-a) 7 | Apr 15 20:30:14 sle12-c crmd[1136]: notice: do_te_invoke: Processing graph 123 (ref=pe_calc-dc-1429122614-234) derived from /var/lib/pacemaker/pengine/pe-input-1907.bz2 8 | Apr 15 20:30:14 sle12-c crmd[1136]: notice: te_rsc_command: Initiating action 60: start small-apache_start_0 on sle12-a 9 | Apr 15 20:30:14 sle12-c pengine[1135]: notice: process_pe_message: Calculated Transition 123: /var/lib/pacemaker/pengine/pe-input-1907.bz2 10 | Apr 15 20:30:14 sle12-a stonithd[1160]: notice: unpack_config: On loss of CCM Quorum: Ignore 11 | Apr 15 20:30:14 sle12-a apache(small-apache)[1586]: INFO: AH00558: httpd2: Could not reliably determine the server's fully qualified domain name, using 10.2.12.51. Set the 'ServerName' directive globally to suppress this message 12 | Apr 15 20:30:14 sle12-a crmd[1164]: notice: process_lrm_event: Operation small-apache_start_0: ok (node=sle12-a, call=69, rc=0, cib-update=48, confirmed=true) 13 | Apr 15 20:30:15 sle12-c crmd[1136]: notice: run_graph: Transition 123 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pacemaker/pengine/pe-input-1907.bz2): Complete 14 | -------------------------------------------------------------------------------- /doc/website-v1/index.adoc: -------------------------------------------------------------------------------- 1 | The CRM Shell 2 | ============= 3 | 4 | ++++ 5 |
6 |

7 | 8 |


9 |
10 | ++++ 11 | 12 | *`crmsh` is a cluster management shell* for the Pacemaker High Availability stack. 13 | 14 | Configure, manage and troubleshoot clusters from the command line, 15 | with full tab completion and extensive help. `crmsh` also provides 16 | advanced features like low-level cluster configuration, cluster scripting, 17 | package management, and history exploration tools giving you a complete 18 | insight into the state of your cluster. 19 | 20 | * https://github.com/ClusterLabs/crmsh/[Source Code] 21 | * link:man-4.6/[Reference Manual (v4.6)] 22 | * link:man-4.3/[Reference Manual (v4.3.1)] 23 | * link:man-3/[Reference Manual (v3.0.0)] 24 | * link:man-2.0/[Reference Manual (v2.3.2)] 25 | * https://build.opensuse.org/package/show/network:ha-clustering:Stable/crmsh[Packages] 26 | * http://clusterlabs.org[Cluster Labs] 27 | -------------------------------------------------------------------------------- /doc/website-v1/installation.adoc: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | See link:/download[Download]. 5 | -------------------------------------------------------------------------------- /doc/website-v1/news/2015-05-25-getting-started-jp.adoc: -------------------------------------------------------------------------------- 1 | Getting Started translated to Japanese 2 | ====================================== 3 | :Author: Kristoffer Gronlund 4 | :Email: kgronlund@suse.com 5 | :Date: 2015-05-25 13:30 6 | 7 | Many thanks to Motoharu Kubo at 3ware for offering to translate the 8 | `crmsh` documentation to Japanese! 9 | 10 | The first document to be translated is the link:/start-guide/[Getting Started] guide, 11 | now available in Japanese at the following location: 12 | 13 | * https://blog.3ware.co.jp/2015/05/crmsh-getting-started/ 14 | 15 | Thank you, 16 | Kristoffer and Dejan 17 | 18 | -------------------------------------------------------------------------------- /doc/website-v1/news/2016-09-01-release-2_1_7.adoc: -------------------------------------------------------------------------------- 1 | Announcing crmsh stable release 2.1.7 2 | ===================================== 3 | :Author: Kristoffer Gronlund 4 | :Email: kgronlund@suse.com 5 | :Date: 2016-09-01 09:00 6 | 7 | Today I are proud to announce the release of `crmsh` version 2.1.7! 8 | The major new thing in this release is a backports of the events-based 9 | alerts support from the 2.3 branch. 10 | 11 | Big thanks to Hideo Yamauchi for his patience and testing of the 12 | alerts backport. 13 | 14 | This time, the list of changes is small enough that I can add it right 15 | here: 16 | 17 | - high: parse: Backport of event-driven alerts parser (#150) 18 | - high: hb_report: Don't collect logs from journalctl if -M is set (bsc#990025) 19 | - high: hb_report: Skip lines without timestamps in log correctly (bsc#989810) 20 | - high: constants: Add maintenance to set of known attributes (bsc#981659) 21 | - high: utils: Avoid deadlock if DC changes during idle wait (bsc#978480) 22 | - medium: scripts: no-quorum-policy=ignore is deprecated (bsc#981056) 23 | - low: cibconfig: Don't mix up CLI name with XML tag 24 | 25 | You can also get the list of changes from the changelog: 26 | 27 | * https://github.com/ClusterLabs/crmsh/blob/2.1.7/ChangeLog 28 | 29 | Right now, I don't have a set of pre-built rpm packages for Linux 30 | distributions ready, but I am going to make this available soon. This 31 | is in particular for centOS 6.x which still relies on Python 2.6 32 | support which makes running the later releases there more 33 | difficult. These packages will most likely appear as a subrepository 34 | here (more details coming soon): 35 | 36 | * http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/ 37 | 38 | Archives of the tagged release: 39 | 40 | * https://github.com/ClusterLabs/crmsh/archive/2.1.7.tar.gz 41 | * https://github.com/ClusterLabs/crmsh/archive/2.1.7.zip 42 | 43 | 44 | Thank you, 45 | 46 | Kristoffer 47 | -------------------------------------------------------------------------------- /doc/website-v1/news/2016-09-02-release-2_3_1.adoc: -------------------------------------------------------------------------------- 1 | Releasing crmsh version 2.3.1 2 | ============================= 3 | :Author: Kristoffer Gronlund 4 | :Email: kgronlund@suse.com 5 | :Date: 2016-09-02 10:00 6 | 7 | Hello everyone! 8 | 9 | Today I am releasing crmsh version 2.3.1. The only change this time is 10 | to lower the Python version requirement from 2.7 to 2.6. This is so 11 | that crmsh remains compatible with centOS 6, where there is no 12 | standardized Python 2.7 version available. For users of other 13 | distributions where Python 2.7 is available, there are no other 14 | changes in this release and no need to upgrade. 15 | 16 | The source code can be downloaded from Github: 17 | 18 | * https://github.com/ClusterLabs/crmsh/releases/tag/2.3.1 19 | 20 | Packages for several popular Linux distributions can be downloaded 21 | from the Stable repository at the OBS: 22 | 23 | * http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/ 24 | 25 | Archives of the tagged release: 26 | 27 | * https://github.com/ClusterLabs/crmsh/archive/2.3.1.tar.gz 28 | * https://github.com/ClusterLabs/crmsh/archive/2.3.1.zip 29 | 30 | As usual, a huge thank you to all contributors and users of crmsh! 31 | 32 | Cheers, 33 | Kristoffer 34 | -------------------------------------------------------------------------------- /doc/website-v1/news/2016-09-05-release-2_2_2.adoc: -------------------------------------------------------------------------------- 1 | Releasing crmsh version 2.2.2 2 | ============================= 3 | :Author: Kristoffer Gronlund 4 | :Email: kgronlund@suse.com 5 | :Date: 2016-09-05 19:00 6 | 7 | Hello everyone! 8 | 9 | Today I am releasing crmsh version 2.2.2. The biggest change in this 10 | release is the backport of the support for event-based alerts from the 11 | 2.3 branch. The full list of changes follows below: 12 | 13 | - high: parse: Backport of event-driven alerts parser (#150) 14 | - high: hb_report: Don't collect logs from journalctl if -M is set (bsc#990025) 15 | - high: hb_report: Skip lines without timestamps in log correctly (bsc#989810) 16 | - high: constants: Add maintenance to set of known attributes (bsc#981659) 17 | - high: utils: Avoid deadlock if DC changes during idle wait (bsc#978480) 18 | - medium: scripts: no-quorum-policy=ignore is deprecated (bsc#981056) 19 | - medium: tmpfiles: Create temporary directory if non-existing (bsc#981583) 20 | - medium: xmlutil: reduce unknown attribute to warning (bsc#981659) 21 | - medium: ui_resource: Add force argument to resource cleanup (bsc#979420) 22 | - parse: Use original _TARGET_RE 23 | 24 | The source code can be downloaded from Github: 25 | 26 | * https://github.com/ClusterLabs/crmsh/releases/tag/2.2.2 27 | 28 | Archives of the tagged release: 29 | 30 | * https://github.com/ClusterLabs/crmsh/archive/2.2.2.tar.gz 31 | * https://github.com/ClusterLabs/crmsh/archive/2.2.2.zip 32 | 33 | As usual, a huge thank you to all contributors and users of crmsh! 34 | 35 | Cheers, 36 | Kristoffer 37 | -------------------------------------------------------------------------------- /doc/website-v1/news/2017-01-31-release-3_0_0.adoc: -------------------------------------------------------------------------------- 1 | Releasing crmsh version 3.0.0 2 | ============================= 3 | :Author: Kristoffer Gronlund 4 | :Email: kgronlund@suse.com 5 | :Date: 2017-01-31 10:00 6 | 7 | Hello everyone! 8 | 9 | I'm happy to announce the release of crmsh version 3.0.0 today. The 10 | main reason for the major version bump is because I have merged the 11 | sleha-bootstrap project with crmsh, replacing the cluster 12 | init/add/remove commands with the corresponding commands from 13 | sleha-bootstrap. 14 | 15 | At the moment, these commands are highly specific to SLE and openSUSE, 16 | unfortunately. I am working on making them as distribution agnostic as 17 | possible, but would appreciate help from users of other distributions 18 | in making them work as well on those platforms as they do on 19 | SLE/openSUSE. 20 | 21 | Briefly, the "cluster init" command configures a complete cluster from 22 | scratch, including optional configuration of fencing via SBD, shared 23 | storage using OCFS2, setting up the Hawk web interface etc. 24 | 25 | There are some other changes in this release as well, see the 26 | ChangeLog for the complete list of changes: 27 | 28 | * https://github.com/ClusterLabs/crmsh/blob/3.0.0/ChangeLog 29 | 30 | The source code can be downloaded from Github: 31 | 32 | * https://github.com/ClusterLabs/crmsh/releases/tag/3.0.0 33 | 34 | This version of crmsh will be available in openSUSE Tumbleweed as soon 35 | as possible, and packages for several popular Linux distributions are 36 | available from the Stable repository at the OBS: 37 | 38 | * http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/ 39 | 40 | Archives of the tagged release: 41 | 42 | * https://github.com/ClusterLabs/crmsh/archive/3.0.0.tar.gz 43 | * https://github.com/ClusterLabs/crmsh/archive/3.0.0.zip 44 | 45 | As usual, a huge thank you to all contributors and users of crmsh! 46 | 47 | Cheers, 48 | Kristoffer 49 | -------------------------------------------------------------------------------- /doc/website-v1/news/2021-06-17-release-4_3_1.adoc: -------------------------------------------------------------------------------- 1 | Releasing crmsh version 4.3.1 2 | ============================= 3 | :Author: Xin Liang 4 | :Email: XLiang@suse.com 5 | :Date: 2021-06-17 11:00 6 | 7 | Hello everyone! 8 | 9 | I'm happy to announce the release of crmsh version 4.3.1 10 | 11 | Major changes since 4.3.0 12 | 13 | Features: 14 | 15 | * Add "crm cluster crash_test" for cluster failure simulation (#825) 16 | 17 | * Add ocfs2.OCFS2Manager to manage ocfs2 stage process with cluster lvm2 (#798) 18 | 19 | * Support setup SBD via bootstrap "sbd" stage on an existing cluster (#744) 20 | 21 | * Enable configuring qdevice on interactive mode (#765) 22 | 23 | Fixes: 24 | 25 | * Adjust sbd watchdog timeout when using diskless SBD with qdevice (#818) 26 | 27 | * Not allow property setting with an empty value (#817) 28 | 29 | * Keep consistent for "help " and " -h" for those using argparse (#644) 30 | 31 | * Sync corosync.conf before finished joining (#775) 32 | 33 | * Adjust qdevice configure/remove process to avoid race condition due to quorum lost (#741) 34 | 35 | * Walk through hb_report process under hacluster (#742) 36 | 37 | There are some other changes in this release as well, see the 38 | ChangeLog for the complete list of changes: 39 | 40 | * https://github.com/ClusterLabs/crmsh/blob/master/ChangeLog 41 | 42 | The source code can be downloaded from Github: 43 | 44 | * https://github.com/ClusterLabs/crmsh/releases/tag/4.3.1 45 | 46 | Development packages for openSUSE Tumbleweed 47 | are available from the Open Build System, here: 48 | 49 | * https://build.opensuse.org/package/show/network:ha-clustering:Factory/crmsh 50 | 51 | As usual, a huge thank you to all contributors and users of crmsh! 52 | 53 | 54 | Regards, 55 | xin 56 | -------------------------------------------------------------------------------- /etc/profiles.yml: -------------------------------------------------------------------------------- 1 | # The valid profile names are: 2 | # "microsoft-azure", "google-cloud-platform", "amazon-web-services", "s390", "default" 3 | # 4 | # "default" profile is loaded in the beginning. 5 | # 6 | # Those specific profile will override the corresponding values in "default" 7 | # profile if the specific environment is detected. 8 | # 9 | # Users could customize the "default" profile for their needs, for example, 10 | # those on-premise environments which is not defined yet. 11 | # 12 | # Profiles are only loaded on bootstrap init node. 13 | # 14 | # More details please see man corosync.conf, man sbd 15 | 16 | default: 17 | corosync.totem.token: 5000 18 | corosync.totem.join: 60 19 | corosync.totem.max_messages: 20 20 | corosync.totem.token_retransmits_before_loss_const: 10 21 | # sbd.msgwait is set to sbd.watchdog_timeout*2 by crmsh 22 | # or, you can define your own value in profiles.yml 23 | sbd.watchdog_timeout: 15 24 | 25 | knet-default: 26 | corosync.totem.crypto_hash: sha256 27 | corosync.totem.crypto_cipher: aes256 28 | 29 | microsoft-azure: 30 | corosync.totem.token: 30000 31 | sbd.watchdog_timeout: 60 32 | 33 | amazon-web-services: 34 | corosync.totem.token: 30000 35 | 36 | google-cloud-platform: 37 | corosync.totem.token: 20000 38 | -------------------------------------------------------------------------------- /high-availability.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | SUSE High Availability Cluster ports 4 | This allows you to open various ports related to SUSE High Availability Cluster. Ports are opened for pacemaker-remote, qnetd, corosync, hawk2, booth, dlm, csync2, fence_kdump_send and drbd. 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /pylint.toml: -------------------------------------------------------------------------------- 1 | [tool.pylint.main] 2 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 3 | # number of processors available to use, and will cap the count on Windows to 4 | # avoid hangs. 5 | jobs = 0 6 | 7 | [tool.pylint."messages control"] 8 | # Disable the message, report, category or checker with the given id(s). You can 9 | # either give multiple identifiers separated by comma (,) or put this option 10 | # multiple times (only on the command line, not in the configuration file where 11 | # it should appear only once). You can also use "--disable=all" to disable 12 | # everything first and then re-enable specific checks. For example, if you want 13 | # to run only the similarities checker, you can use "--disable=all 14 | # --enable=similarities". If you want to run only the classes checker, but have 15 | # no Warning level messages displayed, use "--disable=all --enable=classes 16 | # --disable=W". 17 | disable = ["all"] 18 | 19 | # Enable the message, report, category or checker with the given id(s). You can 20 | # either give multiple identifier separated by comma (,) or put this option 21 | # multiple time (only on the command line, not in the configuration file where it 22 | # should appear only once). See also the "--disable" option for examples. 23 | enable = ["string"] 24 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | python_files = test_*.py 3 | testpaths = crmsh 4 | norecursedirs = 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | lxml 2 | PyYAML 3 | python-dateutil 4 | packaging 5 | -------------------------------------------------------------------------------- /scripts/check-uptime/fetch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import crm_script 3 | try: 4 | uptime = open('/proc/uptime').read().split()[0] 5 | crm_script.exit_ok(uptime) 6 | except Exception as e: 7 | crm_script.exit_fail("Couldn't open /proc/uptime: %s" % (e)) 8 | -------------------------------------------------------------------------------- /scripts/check-uptime/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Script 3 | shortdesc: Check uptime of nodes 4 | longdesc: > 5 | Fetches the uptime of all nodes and reports which 6 | node has lived longest. 7 | 8 | parameters: 9 | - name: show_all 10 | shortdesc: Show all uptimes 11 | type: boolean 12 | value: false 13 | 14 | actions: 15 | - shortdesc: Fetch uptimes 16 | collect: fetch.py 17 | 18 | - shortdesc: Report uptime 19 | report: report.py 20 | -------------------------------------------------------------------------------- /scripts/check-uptime/report.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import crm_script 3 | show_all = crm_script.is_true(crm_script.param('show_all')) 4 | uptimes = list(crm_script.output(1).items()) 5 | max_uptime = '', 0.0 6 | for host, uptime in uptimes: 7 | if float(uptime) > max_uptime[1]: 8 | max_uptime = host, float(uptime) 9 | if show_all: 10 | print("Uptimes: %s" % (', '.join("%s: %s" % v for v in uptimes))) 11 | print("Longest uptime is %s seconds on host %s" % (max_uptime[1], max_uptime[0])) 12 | -------------------------------------------------------------------------------- /scripts/database/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Database 3 | shortdesc: MySQL/MariaDB Database 4 | longdesc: > 5 | Configure a MySQL or MariaDB SQL Database. 6 | Enable the install option to install the necessary 7 | packages for the database. 8 | include: 9 | - agent: ocf:heartbeat:mysql 10 | name: database 11 | parameters: 12 | - name: test_table 13 | value: "" 14 | ops: | 15 | op start timeout=120s 16 | op stop timeout=120s 17 | op monitor interval=20s timeout=30s 18 | 19 | parameters: 20 | - name: install 21 | shortdesc: Enable to install required packages 22 | type: boolean 23 | value: false 24 | 25 | actions: 26 | - install: mariadb 27 | shortdesc: Install packages 28 | when: install 29 | - service: 30 | - name: mysql 31 | action: disable 32 | shortdesc: Let cluster manage the database 33 | when: install 34 | - include: database 35 | -------------------------------------------------------------------------------- /scripts/db2-hadr/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Database 3 | shortdesc: IBM DB2 Database with HADR 4 | longdesc: >- 5 | Configure an IBM DB2 database resource as active/passive HADR, 6 | along with a Virtual IP. 7 | 8 | include: 9 | - agent: ocf:heartbeat:db2 10 | parameters: 11 | - name: id 12 | required: true 13 | shortdesc: DB2 Resource ID 14 | longdesc: Unique ID for the database resource in the cluster. 15 | type: string 16 | value: db2-database 17 | - name: instance 18 | required: true 19 | type: string 20 | value: db2inst1 21 | - name: dblist 22 | value: db1 23 | ops: | 24 | op start interval="0" timeout="130" 25 | op stop interval="0" timeout="120" 26 | op promote interval="0" timeout="120" 27 | op demote interval="0" timeout="120" 28 | op monitor interval="30" timeout="60" 29 | op monitor interval="45" role="Master" timeout="60" 30 | 31 | - script: virtual-ip 32 | shortdesc: The IP address configured here will start before the DB2 instance. 33 | parameters: 34 | - name: id 35 | value: db2-virtual-ip 36 | actions: 37 | - include: virtual-ip 38 | - include: db2 39 | - cib: | 40 | clone promotable-{{db2:id}} {{db2:id}} 41 | meta target-role=Stopped notify=true promotable=true 42 | colocation {{virtual-ip:id}}-with-master inf: {{virtual-ip:id}}:Started ms-{{db2:id}}:Master 43 | order {{virtual-ip:id}}-after-master Mandatory: ms-{{db2:id}}:promote {{virtual-ip:id}}:start 44 | -------------------------------------------------------------------------------- /scripts/db2/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Database 3 | shortdesc: IBM DB2 Database 4 | longdesc: >- 5 | Configure an IBM DB2 database resource, along with a Virtual IP and a file system mount point. 6 | 7 | Note that the file system resource will be stopped initially, in case you need to run mkfs. 8 | 9 | include: 10 | - agent: ocf:heartbeat:db2 11 | parameters: 12 | - name: id 13 | required: true 14 | shortdesc: DB2 Resource ID 15 | longdesc: Unique ID for the database resource in the cluster. 16 | type: string 17 | value: db2-database 18 | - name: instance 19 | required: true 20 | type: string 21 | value: db2inst1 22 | - script: virtual-ip 23 | shortdesc: The IP address configured here will start before the DB2 instance. 24 | parameters: 25 | - name: id 26 | value: db2-virtual-ip 27 | - script: filesystem 28 | shortdesc: The file system configured here will be mounted before the DB2 instance. 29 | parameters: 30 | - name: id 31 | value: db2-fs 32 | - name: fstype 33 | value: xfs 34 | - name: directory 35 | value: "/db2/db2inst1" 36 | actions: 37 | - include: virtual-ip 38 | - include: filesystem 39 | - include: db2 40 | - cib: | 41 | group g-{{id}} 42 | {{virtual-ip:id}} 43 | {{filesystem:id}} 44 | {{id}} 45 | meta target-role=Stopped 46 | -------------------------------------------------------------------------------- /scripts/drbd/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: File System 3 | shortdesc: DRBD Block Device 4 | longdesc: >- 5 | Distributed Replicated Block Device. Configure a DRBD cluster resource. 6 | 7 | Also creates a multistate resource managing the state of DRBD. 8 | 9 | Does not create or modify the referenced DRBD configuration. 10 | 11 | parameters: 12 | - name: id 13 | shortdesc: DRBD Cluster Resource ID 14 | required: true 15 | value: drbd-data 16 | type: resource 17 | - name: drbd_resource 18 | shortdesc: DRBD Resource Name 19 | required: true 20 | value: drbd0 21 | type: string 22 | - name: drbdconf 23 | value: "/etc/drbd.conf" 24 | - name: install 25 | type: boolean 26 | shortdesc: Install packages for DRBD 27 | value: false 28 | 29 | actions: 30 | - install: drbd drbd-kmp-default 31 | shortdesc: Install packages for DRBD 32 | when: install 33 | - cib: | 34 | primitive {{id}} ocf:linbit:drbd 35 | params 36 | drbd_resource="{{drbd_resource}}" 37 | drbdconf="{{drbdconf}}" 38 | op monitor interval="29s" role="Master" 39 | op monitor interval="31s" role="Slave" 40 | clone promotable-{{id}} {{id}} 41 | meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true promotable=true 42 | -------------------------------------------------------------------------------- /scripts/exportfs/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | shortdesc: "NFS Exported File System" 3 | category: NFS 4 | include: 5 | - agent: ocf:heartbeat:exportfs 6 | parameters: 7 | - name: id 8 | required: true 9 | shortdesc: Resource ID 10 | longdesc: Cluster Resource ID 11 | type: resource 12 | value: exportfs 13 | - name: fsid 14 | shortdesc: Unique FSID Within Cluster or Starting FSID for Multiple Exports 15 | required: true 16 | type: integer 17 | value: 1 18 | - name: directory 19 | required: true 20 | type: string 21 | shortdesc: Mount Point (Directory) 22 | longdesc: "The mount point for the file system, e.g.: /srv/nfs/home" 23 | - name: options 24 | required: true 25 | shortdesc: Mount Options 26 | longdesc: "Any additional options to be given to the mount command, for example rw,mountpoint" 27 | type: string 28 | - name: wait_for_leasetime_on_stop 29 | required: false 30 | shortdesc: Wait for Lease Time on Stop 31 | longdesc: If set to true, wait for lease on stop. 32 | type: boolean 33 | value: true 34 | ops: | 35 | op monitor interval=30s 36 | actions: 37 | - include: exportfs 38 | -------------------------------------------------------------------------------- /scripts/filesystem/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: File System 3 | shortdesc: File System (mount point) 4 | include: 5 | - agent: ocf:heartbeat:Filesystem 6 | name: filesystem 7 | parameters: 8 | - name: id 9 | required: true 10 | type: resource 11 | - name: device 12 | required: true 13 | type: string 14 | - name: directory 15 | required: true 16 | type: string 17 | - name: fstype 18 | required: true 19 | type: string 20 | - name: options 21 | required: false 22 | type: string 23 | ops: | 24 | meta target-role=Stopped 25 | op start timeout=60s 26 | op stop timeout=60s 27 | op monitor interval=20s timeout=40s 28 | 29 | actions: 30 | - include: filesystem 31 | -------------------------------------------------------------------------------- /scripts/gfs2-base/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2009 Andrew Beekhof 2 | # Copyright (C) 2015 Kristoffer Gronlund 3 | # 4 | # License: GNU General Public License (GPL) 5 | version: 2.2 6 | category: Script 7 | shortdesc: GFS2 File System Base (Cloned) 8 | longdesc: | 9 | This template generates a cloned instance of the GFS2 file system. 10 | The file system should be on the device, unless cLVM is used. 11 | 12 | parameters: 13 | - name: clvm-group 14 | shortdesc: cLVM Resource Group ID 15 | longdesc: Optional ID of a cLVM resource group. 16 | required: False 17 | 18 | actions: 19 | - cib: | 20 | primitive gfs-controld ocf:pacemaker:controld 21 | 22 | clone c-gfs gfs-controld 23 | meta interleave=true ordered=true 24 | 25 | - crm: configure modgroup {{clvm-group}} add c-gfs 26 | shortdesc: Add gfs controld to cLVM group 27 | when: clvm-group 28 | -------------------------------------------------------------------------------- /scripts/gfs2/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2009 Andrew Beekhof 2 | # Copyright (C) 2015 Kristoffer Gronlund 3 | # 4 | # License: GNU General Public License (GPL) 5 | version: 2.2 6 | shortdesc: GFS2 File System (Cloned) 7 | longdesc: >- 8 | This template generates a cloned instance of the GFS2 file system. 9 | The file system should be on the device, unless cLVM is used. 10 | 11 | category: File System 12 | parameters: 13 | - name: id 14 | shortdesc: File System Resource ID 15 | longdesc: "NB: The clone is going to be named c- (e.g. c-bigfs)" 16 | example: bigfs 17 | required: true 18 | type: resource 19 | - name: directory 20 | shortdesc: Mount Point 21 | example: /mnt/bigfs 22 | required: true 23 | type: string 24 | - name: device 25 | shortdesc: Device 26 | required: true 27 | type: string 28 | - name: options 29 | shortdesc: Mount Options 30 | type: string 31 | required: false 32 | - name: dlm 33 | shortdesc: Create DLM Resource and Cloned Group 34 | longdesc: If set, create the DLM resource and cloned resource group. 35 | type: boolean 36 | default: true 37 | - name: group 38 | shortdesc: Cloned Group Resource ID 39 | longdesc: ID of cloned group 40 | required: false 41 | type: resource 42 | default: g-dlm 43 | actions: 44 | - when: dlm 45 | cib: | 46 | primitive dlm ocf:pacemaker:controld 47 | op start timeout=90 48 | op stop timeout=60 49 | group {{group}} dlm 50 | clone c-dlm {{group}} meta interleave=true 51 | - cib: | 52 | primitive {{id}} ocf:heartbeat:Filesystem 53 | directory="{{directory}}" 54 | fstype="gfs2" 55 | device="{{device}}" 56 | {{#options}}options="{{options}}"{{/options}} 57 | op start timeout=60s 58 | op stop timeout=60s 59 | op monitor interval=20s timeout=40s 60 | 61 | - crm: configure modgroup {{group}} add {{id}} 62 | shortdesc: Add the GFS2 File System to the Cloned Group 63 | -------------------------------------------------------------------------------- /scripts/haproxy/haproxy.cfg: -------------------------------------------------------------------------------- 1 | global 2 | maxconn 256 3 | daemon 4 | 5 | defaults 6 | mode http 7 | timeout connect 5000ms 8 | timeout client 50000ms 9 | timeout server 50000ms 10 | 11 | listen http-in 12 | bind 0.0.0.0:80 13 | stats enable 14 | -------------------------------------------------------------------------------- /scripts/haproxy/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Server 3 | shortdesc: HAProxy 4 | longdesc: | 5 | HAProxy is a free, very fast and reliable solution offering 6 | high availability, load balancing, and proxying for TCP and 7 | HTTP-based applications. It is particularly suited for very 8 | high traffic web sites and powers quite a number of the 9 | world's most visited ones. 10 | 11 | NOTE: Installs a basic haproxy.cfg configuration file. 12 | This will overwrite any existing haproxy.cfg. 13 | 14 | include: 15 | - agent: systemd:haproxy 16 | name: haproxy 17 | ops: | 18 | op monitor interval=10s 19 | 20 | parameters: 21 | - name: install 22 | type: boolean 23 | value: false 24 | shortdesc: Install and configure HAProxy packages 25 | 26 | actions: 27 | - install: haproxy 28 | nodes: all 29 | when: install 30 | - service: "haproxy:disable" 31 | nodes: all 32 | when: install 33 | - copy: haproxy.cfg 34 | to: /etc/haproxy/haproxy.cfg 35 | nodes: all 36 | when: install 37 | - include: haproxy 38 | -------------------------------------------------------------------------------- /scripts/health/hahealth.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import glob 3 | import os 4 | import crm_script as crm 5 | 6 | 7 | if not os.path.isfile('/usr/sbin/crm') and not os.path.isfile('/usr/bin/crm'): 8 | # crm not installed 9 | crm.exit_ok({'status': 'crm not installed'}) 10 | 11 | 12 | def get_from_date(): 13 | rc, out, err = crm.call("date '+%F %H:%M' --date='1 day ago'", shell=True) 14 | return out.strip() 15 | 16 | 17 | def create_report(): 18 | cmd = ['crm', 'report', 19 | '-f', get_from_date(), 20 | '-Z', 'health-report'] 21 | rc, out, err = crm.call(cmd, shell=False) 22 | return rc == 0 23 | 24 | 25 | if not create_report(): 26 | crm.exit_fail('Failed to create report') 27 | 28 | 29 | def extract_report(): 30 | path = None 31 | compressed_tars = glob.glob('health-report.tar.*') 32 | if compressed_tars: 33 | path = compressed_tars[0] 34 | elif os.access('health-report.tar', os.F_OK | os.R_OK): 35 | path = 'health-report.tar' 36 | else: 37 | crm.exit_fail('Failed to extract report: file not found.') 38 | rc, out, err = crm.call(['tar', '-xf', path], shell=False) 39 | return rc == 0 40 | 41 | 42 | if not extract_report(): 43 | crm.exit_fail('Failed to extract report') 44 | 45 | analysis = '' 46 | if os.path.isfile('health-report/analysis.txt'): 47 | analysis = open('health-report/analysis.txt').read() 48 | 49 | crm.exit_ok({'status': 'OK', 'analysis': analysis}) 50 | -------------------------------------------------------------------------------- /scripts/health/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Basic 3 | shortdesc: Verify health and configuration 4 | longdesc: | 5 | Checks and detects issues with the cluster, by creating and 6 | analysing a cluster report. 7 | 8 | Requires SSH access between cluster nodes. This command is 9 | also available from the command line as "crm cluster health". 10 | actions: 11 | - collect: collect.py 12 | shortdesc: Collect information 13 | - apply_local: hahealth.py 14 | shortdesc: Run cluster health check 15 | - report: report.py 16 | shortdesc: Report cluster state 17 | -------------------------------------------------------------------------------- /scripts/lvm-drbd/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016 Kristoffer Gronlund 2 | # 3 | # License: GNU General Public License (GPL) 4 | version: 2.2 5 | category: File System 6 | shortdesc: LVM Group on DRBD 7 | longdesc: | 8 | Configure a LVM resource group on top of DRBD. 9 | 10 | A DRBD primitive and Multi-state resource is used to replicate 11 | data between the nodes. 12 | 13 | LVM and file system resources are used to make the file systems 14 | available on the Primary node. 15 | 16 | For more details on what needs to be prepared to use 17 | this wizard, see the Highly Available NFS Storage with 18 | DRBD and Pacemaker section of the SUSE Linux Enterprise 19 | High Availability Extension 12 SP1 documentation. 20 | 21 | parameters: 22 | - name: group_id 23 | type: resource 24 | required: true 25 | shortdesc: Group Resource ID 26 | value: g-lvm 27 | 28 | include: 29 | - name: drbd 30 | script: drbd 31 | required: true 32 | parameters: 33 | - name: drbd_resource 34 | value: vg1 35 | 36 | - name: lvm 37 | script: lvm 38 | required: true 39 | parameters: 40 | - name: volgrpname 41 | value: vg1 42 | 43 | - name: example_fs 44 | shortdesc: Example File System Resource 45 | script: filesystem 46 | required: false 47 | parameters: 48 | - name: device 49 | value: /dev/example 50 | - name: directory 51 | value: /srv/example 52 | - name: fstype 53 | value: xfs 54 | 55 | actions: 56 | - include: drbd 57 | - include: lvm 58 | - shortdesc: Configure LVM and File System Group and Constraints 59 | cib: | 60 | group {{group_id}} {{lvm:id}} {{#example_fs:id}}{{example_fs:id}}{{/example_fs:id}} 61 | order o-drbd_before_{{group_id}} Mandatory: ms-{{drbd:id}}:promote {{group_id}}:start 62 | colocation c-{{group_id}}_on_drbd inf: {{group_id}} ms-{{drbd:id}}:Master 63 | -------------------------------------------------------------------------------- /scripts/lvm/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Script 3 | longdesc: >- 4 | Configure a resource for managing an LVM volume group. 5 | 6 | Does not create the referenced volume group. 7 | 8 | include: 9 | - agent: ocf:heartbeat:LVM-activate 10 | name: lvm 11 | parameters: 12 | - name: id 13 | required: true 14 | value: lvm 15 | type: resource 16 | - name: volgrpname 17 | required: true 18 | type: string 19 | ops: | 20 | op monitor interval=130s timeout=130s 21 | op stop timeout=130s on-fail=fence 22 | -------------------------------------------------------------------------------- /scripts/mailto/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | shortdesc: E-Mail 3 | longdesc: | 4 | Notifies recipient by e-mail in the event of a resource takeover. 5 | category: Basic 6 | include: 7 | - agent: ocf:heartbeat:MailTo 8 | name: mailto 9 | parameters: 10 | - name: id 11 | type: resource 12 | required: true 13 | - name: email 14 | type: email 15 | required: true 16 | - name: subject 17 | type: string 18 | required: false 19 | ops: | 20 | op start timeout="10" 21 | op stop timeout="10" 22 | op monitor interval="10" timeout="10" 23 | actions: 24 | - install: 25 | - mailx 26 | shortdesc: Ensure mail package is installed 27 | - include: mailto 28 | - cib: | 29 | clone c-{{id}} {{id}} 30 | -------------------------------------------------------------------------------- /scripts/nginx/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2017 Xin Liang 2 | # 3 | # License: GNU General Public License (GPL) 4 | version: 2.2 5 | category: Server 6 | shortdesc: Nginx Webserver 7 | longdesc: | 8 | Configure a resource group containing a virtual IP address and 9 | an instance of the Nginx web server. 10 | 11 | You can optionally configure a file system resource which will be 12 | mounted before the web server is started. 13 | 14 | You can also optionally configure a database resource which will 15 | be started before the web server but after mounting the optional 16 | file system. 17 | include: 18 | - agent: ocf:heartbeat:nginx 19 | name: nginx 20 | longdesc: | 21 | The Nginx configuration file specified here must be available via the 22 | same path on all cluster nodes; And nginx.service should be disabled on 23 | all cluster nodes; And "server_name" option in nginx configure file 24 | should be related with virtual IP. 25 | ops: | 26 | op start timeout="40" 27 | op stop timeout="60" 28 | op monitor interval="10" timeout="30" 29 | - script: virtual-ip 30 | shortdesc: The IP address configured here will start before the Nginx instance. 31 | parameters: 32 | - name: id 33 | value: "{{id}}-vip" 34 | - script: filesystem 35 | shortdesc: Optional file system mounted before the web server is started. 36 | required: false 37 | - script: database 38 | shortdesc: Optional database started before the web server is started. 39 | required: false 40 | parameters: 41 | - name: install 42 | type: boolean 43 | shortdesc: Install and configure nginx 44 | value: false 45 | actions: 46 | - install: 47 | - nginx 48 | shortdesc: Install the nginx package 49 | when: install 50 | - service: 51 | - apache: disable 52 | shortdesc: Let cluster manage nginx 53 | when: install 54 | - include: filesystem 55 | - include: database 56 | - include: virtual-ip 57 | - include: nginx 58 | - cib: | 59 | group g-{{id}} 60 | {{filesystem:id}} 61 | {{database:id}} 62 | {{virtual-ip:id}} 63 | {{id}} 64 | -------------------------------------------------------------------------------- /scripts/oracle/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Database 3 | shortdesc: Oracle Database 4 | longdesc: Configure an Oracle Database cluster resource. 5 | parameters: 6 | - name: id 7 | required: true 8 | shortdesc: Resource ID 9 | longdesc: Unique ID for the database cluster resource. 10 | type: resource 11 | value: oracle 12 | - name: sid 13 | required: true 14 | shortdesc: Database SID 15 | type: string 16 | value: OracleDB 17 | - name: listener 18 | shortdesc: Listener. 19 | required: true 20 | type: string 21 | value: LISTENER 22 | - name: home 23 | required: true 24 | shortdesc: Database Home. 25 | type: string 26 | value: /srv/oracledb 27 | - name: user 28 | required: true 29 | shortdesc: Database User. 30 | type: string 31 | default: oracle 32 | actions: 33 | - cib: | 34 | primitive lsn-{{id}} ocf:heartbeat:oralsnr 35 | params 36 | sid="{{sid}}" 37 | home="{{home}}" 38 | user="{{user}}" 39 | listener="{{listener}}" 40 | op monitor interval="30" timeout="60" depth="0" 41 | 42 | primitive {{id}} ocf:heartbeat:oracle 43 | params 44 | sid="{{sid}}" 45 | home="{{home}}" 46 | user="{{user}}" 47 | op monitor interval="120s" 48 | 49 | colocation lsn-with-{{id}} inf: {{id}} lsn-{{id}} 50 | order lsn-before-{{id}} Mandatory: lsn-{{id}} {{id}} 51 | -------------------------------------------------------------------------------- /scripts/raid-lvm/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: File System 3 | shortdesc: RAID Hosting LVM 4 | longdesc: "Configure a RAID 1 host based mirror together with a cluster manager LVM volume group and LVM volumes." 5 | parameters: 6 | - name: id 7 | shortdesc: RAID and LVM Group ID 8 | longdesc: File systems that should be mounted in the LVM can be added to this group resource. 9 | type: resource 10 | value: g-raid 11 | required: true 12 | include: 13 | - script: raid1 14 | parameters: 15 | - name: raidconf 16 | value: /etc/mdadm.conf 17 | type: string 18 | - name: raiddev 19 | value: /dev/md0 20 | type: string 21 | - script: lvm 22 | actions: 23 | - include: lvm 24 | - include: raid1 25 | - cib: group {{id}} {{raid1:id}} {{lvm:id}} meta target-role=stopped 26 | -------------------------------------------------------------------------------- /scripts/raid1/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Script 3 | include: 4 | - agent: ocf:heartbeat:Raid1 5 | name: raid1 6 | parameters: 7 | - name: id 8 | required: true 9 | value: raid1 10 | - name: raidconf 11 | required: true 12 | type: string 13 | - name: raiddev 14 | required: true 15 | type: string 16 | ops: | 17 | op monitor interval=60s timeout=130s on-fail=fence 18 | -------------------------------------------------------------------------------- /scripts/sap-as/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: SAP 3 | shortdesc: SAP ASCS Instance 4 | longdesc: | 5 | Configure a SAP ASCS instance including: 6 | 7 | 1) Virtual IP address for the SAP ASCS instance, 8 | 9 | 2) A file system on shared storage (/usr/sap/SID/ASCS##), 10 | 11 | 3) SAPInstance for ASCS. 12 | 13 | parameters: 14 | - name: id 15 | shortdesc: SAP ASCS Resource Group ID 16 | longdesc: Unique ID for the SAP ASCS instance resource group in the cluster. 17 | required: true 18 | type: resource 19 | value: grp_sap_NA0_sapna0as 20 | 21 | include: 22 | - script: sapinstance 23 | required: true 24 | parameters: 25 | - name: id 26 | value: rsc_sapinst_NA0_ASCS00_sapna0as 27 | - name: InstanceName 28 | value: NA0_ASCS00_sapna0as 29 | - name: START_PROFILE 30 | value: "/usr/sap/NA0/SYS/profile/START_ASCS00_sapna0as" 31 | - script: virtual-ip 32 | shortdesc: The Virtual IP address configured here will be for the SAP ASCS instance. 33 | required: true 34 | parameters: 35 | - name: id 36 | value: rsc_ip_NA0_sapna0as 37 | - name: ip 38 | value: 172.17.2.53 39 | - name: cidr_netmask 40 | value: 24 41 | - name: nic 42 | value: eth0 43 | - script: filesystem 44 | shortdesc: "File system resource for the /usr/sap/SID/ASCS## directory" 45 | longdesc: >- 46 | If a file system does not already exist on the block device 47 | specified here, you will need to run mkfs to create it, prior 48 | to starting the file system resource. You will also need 49 | to create the mount point directory on all cluster nodes. 50 | parameters: 51 | - name: id 52 | value: rsc_fs_NA0_sapna0as 53 | - name: directory 54 | value: "/usr/sap/NA0/ASCS00" 55 | - name: options 56 | value: "noatime,barrier=0,data=writeback" 57 | ops: | 58 | op stop timeout=300 59 | op monitor interval=30 timeout=130 60 | 61 | actions: 62 | - include: sapinstance 63 | - include: virtual-ip 64 | - include: filesystem 65 | - cib: 66 | group {{id}} 67 | {{virtual-ip:id}} 68 | {{filesystem:id}} 69 | {{sapinstance:id}} 70 | meta target-role=Stopped 71 | -------------------------------------------------------------------------------- /scripts/sap-ci/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: SAP 3 | shortdesc: SAP Central Instance 4 | longdesc: | 5 | Configure a SAP Central Instance including: 6 | 7 | 1) Virtual IP address for the SAP Central instance, 8 | 9 | 2) A file system on shared storage (/usr/sap/SID/DVEBMGS##), 10 | 11 | 3) SAPInstance for the Central Instance. 12 | 13 | parameters: 14 | - name: id 15 | shortdesc: SAP Central Resource Group ID 16 | longdesc: Unique ID for the SAP Central instance resource group in the cluster. 17 | required: true 18 | type: resource 19 | value: grp_sap_NA0_sapna0ci 20 | 21 | include: 22 | - script: sapinstance 23 | required: true 24 | parameters: 25 | - name: id 26 | value: rsc_sapinst_NA0_DVEBMGS01_sapna0ci 27 | - name: InstanceName 28 | value: NA0_DVEBMGS01_sapna0ci 29 | - name: START_PROFILE 30 | value: "/usr/sap/NA0/SYS/profile/START_DVEBMGS01_sapna0ci" 31 | - script: virtual-ip 32 | shortdesc: The Virtual IP address configured here will be for the SAP Central instance. 33 | required: true 34 | parameters: 35 | - name: id 36 | value: rsc_ip_NA0_sapna0ci 37 | - name: ip 38 | value: 172.17.2.55 39 | - name: cidr_netmask 40 | value: 24 41 | - name: nic 42 | value: eth0 43 | - script: filesystem 44 | shortdesc: "File system resource for the /usr/sap/SID/DVEBMGS## directory." 45 | longdesc: >- 46 | If a file system does not already exist on the block device 47 | specified here, you will need to run mkfs to create it, prior 48 | to starting the file system resource. You will also need 49 | to create the mount point directory on all cluster nodes. 50 | parameters: 51 | - name: id 52 | value: rsc_fs_NA0_sapna0ci 53 | - name: directory 54 | value: "/usr/sap/NA0/DVEBMGS01" 55 | - name: options 56 | value: "noatime,barrier=0,data=writeback" 57 | ops: | 58 | op stop timeout=300 59 | op monitor interval=30 timeout=130 60 | 61 | actions: 62 | - include: sapinstance 63 | - include: virtual-ip 64 | - include: filesystem 65 | - cib: 66 | group {{id}} 67 | {{virtual-ip:id}} 68 | {{filesystem:id}} 69 | {{sapinstance:id}} 70 | meta target-role=Stopped 71 | -------------------------------------------------------------------------------- /scripts/sap-db/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: SAP 3 | shortdesc: SAP Database Instance 4 | longdesc: | 5 | Configure a SAP database instance including: 6 | 7 | 1) A virtual IP address for the SAP database instance, 8 | 9 | 2) A file system on shared storage (/sapdb), 10 | 11 | 3) SAPinstance for the database. 12 | 13 | parameters: 14 | - name: id 15 | shortdesc: SAP Database Resource Group ID 16 | longdesc: Unique ID for the SAP Database instance resource group in the cluster. 17 | required: true 18 | type: resource 19 | value: grp_sapdb_NA0 20 | 21 | include: 22 | - script: sapdb 23 | required: true 24 | - script: virtual-ip 25 | shortdesc: The Virtual IP address configured here will be for the SAP Database instance. 26 | required: true 27 | parameters: 28 | - name: id 29 | value: rsc_ip_NA0_sapna0db 30 | - name: ip 31 | value: 172.17.2.54 32 | - name: cidr_netmask 33 | value: 24 34 | - name: nic 35 | value: eth0 36 | - script: filesystem 37 | shortdesc: "File system resource for the SAP database (typically /sapdb)." 38 | longdesc: >- 39 | If a file system does not already exist on the block device 40 | specified here, you will need to run mkfs to create it, prior 41 | to starting the file system resource. You will also need 42 | to create the mount point directory on all cluster nodes. 43 | parameters: 44 | - name: id 45 | value: rsc_fs_NA0_sapna0db 46 | - name: directory 47 | value: "/sapdb" 48 | - name: options 49 | value: "noatime,barrier=0,data=writeback" 50 | ops: | 51 | op stop timeout=300 52 | op monitor interval=30 timeout=130 53 | 54 | actions: 55 | - include: sapdb 56 | - include: virtual-ip 57 | - include: filesystem 58 | - cib: 59 | group {{id}} 60 | {{virtual-ip:id}} 61 | {{filesystem:id}} 62 | {{sapdb:id}} 63 | meta target-role=Stopped 64 | -------------------------------------------------------------------------------- /scripts/sapdb/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Script 3 | shortdesc: SAP Database Instance 4 | longdesc: Create a single SAP Database Instance. 5 | 6 | parameters: 7 | - name: id 8 | required: true 9 | shortdesc: Resource ID 10 | longdesc: Unique ID for this SAP instance resource in the cluster. 11 | type: resource 12 | value: rsc_sabdb_NA0 13 | - name: SID 14 | required: true 15 | shortdesc: Database SID 16 | longdesc: The SID for the database. 17 | type: string 18 | value: NA0 19 | - name: DBTYPE 20 | required: true 21 | shortdesc: Database Type 22 | longdesc: The type of database. 23 | value: ADA 24 | type: string 25 | 26 | actions: 27 | - cib: | 28 | primitive {{id}} ocf:heartbeat:SAPDatabase 29 | params SID="{{SID}}" DBTYPE="{{DBTYPE}}" 30 | op monitor interval="120" timeout="60" start-delay="180" 31 | op start timeout="1800" 32 | op stop timeout="1800" 33 | -------------------------------------------------------------------------------- /scripts/sapinstance/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Script 3 | shortdesc: SAP Instance 4 | longdesc: Create a single SAP Instance. 5 | 6 | parameters: 7 | - name: id 8 | required: true 9 | shortdesc: Resource ID 10 | longdesc: Unique ID for this SAP instance resource in the cluster. 11 | type: resource 12 | value: sapinstance 13 | - name: InstanceName 14 | required: true 15 | shortdesc: Instance Name 16 | longdesc: The name of the SAP instance. 17 | type: string 18 | value: sapinstance 19 | - name: START_PROFILE 20 | required: true 21 | shortdesc: Start Profile 22 | longdesc: This defines the path and the file name of the SAP start profile of this particular instance. 23 | type: string 24 | - name: AUTOMATIC_RECOVER 25 | required: true 26 | shortdesc: Automatic Recover 27 | longdesc: >- 28 | The SAPInstance resource agent tries to recover a failed start 29 | attempt automaticaly one time. This is done by killing runing 30 | instance processes, removing the kill.sap file and executing 31 | cleanipc. Sometimes a crashed SAP instance leaves some 32 | processes and/or shared memory segments behind. Setting this 33 | option to true will try to remove those leftovers during a 34 | start operation. That is to reduce manual work for the 35 | administrator. 36 | type: boolean 37 | value: true 38 | 39 | actions: 40 | - cib: | 41 | primitive {{id}} ocf:heartbeat:SAPInstance 42 | params 43 | InstanceName="{{InstanceName}}" 44 | AUTOMATIC_RECOVER="{{AUTOMATIC_RECOVER}}" 45 | START_PROFILE="{{START_PROFILE}}" 46 | op monitor interval="180" timeout="60" start-delay="240" 47 | op start timeout="240" 48 | op stop timeout="240" on-fail="block" 49 | -------------------------------------------------------------------------------- /scripts/sbd-device/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016 Kristoffer Gronlund 2 | # 3 | # License: GNU General Public License (GPL) 4 | version: 2.2 5 | category: Script 6 | shortdesc: "Create SBD Device" 7 | longdesc: | 8 | Optional step to initialize and configure the SBD Device. 9 | 10 | Prerequisites: 11 | 12 | * The environment must have shared storage reachable by all nodes. 13 | 14 | parameters: 15 | - name: device 16 | shortdesc: Shared Storage Device 17 | example: /dev/disk/by-id/... 18 | required: true 19 | type: string 20 | 21 | - name: watchdog 22 | shortdesc: Watchdog Device 23 | value: /dev/watchdog 24 | type: string 25 | 26 | actions: 27 | - shortdesc: Verify configuration 28 | sudo: true 29 | call: | 30 | #!/bin/sh 31 | set -e 32 | systemctl is-active --quiet sbd && { echo "ERROR: SBD daemon is already running"; exit 1; } || true 33 | test -b "{{device}}" || { echo "ERROR: Not a device: {{device}"; exit 1; } 34 | lsmod | egrep "(wd|dog)" || { echo "ERROR: No watchdog kernel module loaded"; exit 1; } 35 | test -c "{{watchdog}}" || { echo "ERROR: Not a device: {{watchdog}}"; exit 1; } 36 | 37 | - shortdesc: Initialize the SBD device 38 | sudo: true 39 | nodes: local 40 | call: | 41 | #!/bin/sh 42 | sbd dump &> /dev/null || sbd -d "{{device}}" create 43 | # sbd allocate "$(uname -n)" # FIXME 44 | 45 | - shortdesc: Verify SBD Device 46 | call: | 47 | #!/bin/sh 48 | sbd -d "{{device}}" list 49 | 50 | - shortdesc: Configure SBD Daemon 51 | sudo: true 52 | call: | 53 | #!/bin/sh 54 | [ -f "/etc/sysconfig/sbd" ] && rm -f /etc/sysconfig/sbd || true 55 | < /etc/sysconfig/sbd 60 | 61 | - shortdesc: Enable SBD Daemon 62 | service: 63 | - sbd: start 64 | -------------------------------------------------------------------------------- /scripts/sbd/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2009 Dejan Muhamedagic 2 | # Copyright (C) 2015 Kristoffer Gronlund 3 | # 4 | # License: GNU General Public License (GPL) 5 | version: 2.2 6 | category: Stonith 7 | shortdesc: "SBD, Shared storage based fencing" 8 | longdesc: | 9 | Create a SBD STONITH resource. SBD must be configured to use 10 | a particular shared storage device using /etc/sysconfig/sbd. 11 | 12 | This wizard can optionally create and configure a SBD device. 13 | A shared device must be available and visible on all nodes. 14 | 15 | For more information, see http://www.linux-ha.org/wiki/SBD_Fencing 16 | or the sbd(8) manual page. 17 | 18 | parameters: 19 | - name: id 20 | shortdesc: Resource ID (Name) 21 | value: sbd-fencing 22 | example: sbd-fencing 23 | required: true 24 | type: resource 25 | 26 | include: 27 | - script: sbd-device 28 | required: false 29 | 30 | actions: 31 | - include: sbd-device 32 | 33 | - cib: | 34 | primitive {{id}} stonith:fence_sbd 35 | pcmk_delay_max=30s 36 | 37 | property stonith-enabled=true 38 | -------------------------------------------------------------------------------- /scripts/virtual-ip/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | shortdesc: Virtual IP 3 | category: Basic 4 | include: 5 | - agent: ocf:heartbeat:IPaddr2 6 | name: virtual-ip 7 | parameters: 8 | - name: id 9 | type: resource 10 | required: true 11 | - name: ip 12 | type: ip_address 13 | required: true 14 | - name: cidr_netmask 15 | type: integer 16 | required: false 17 | - name: broadcast 18 | type: string 19 | required: false 20 | ops: | 21 | op start timeout="20" op stop timeout="20" 22 | op monitor interval="10" timeout="20" 23 | actions: 24 | - include: virtual-ip 25 | -------------------------------------------------------------------------------- /scripts/vmware/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016 Kristoffer Gronlund 2 | # 3 | # License: GNU General Public License (GPL) 4 | version: 2.2 5 | category: Stonith 6 | shortdesc: Fencing using vCenter / ESX Server 7 | longdesc: | 8 | Note that SBD is the recommended fencing mechanism for VMware 9 | hosts! Please refer to the documentation for more details on 10 | recommended fencing configurations. 11 | 12 | Fencing for VMware virtualized hosts using ESX Server or vCenter. 13 | 14 | This wizard configures a fencing resource for a single node. 15 | It is necessary to run the wizard for each node to fence. 16 | 17 | Prerequisites 18 | 19 | 1. Install the vSphere Web Services SDK on all nodes. 20 | 21 | 2. Generate vCenter credentials using credstore_admin.pl 22 | 23 | 3. Copy credentials to the same location on all nodes. 24 | 25 | parameters: 26 | - name: id 27 | type: resource 28 | shortdesc: Base Resource ID 29 | value: vcenter-fencing 30 | required: true 31 | - name: node_name 32 | type: string 33 | shortdesc: Name of node to fence 34 | required: true 35 | - name: machine_name 36 | type: string 37 | shortdesc: Name of machine in vCenter inventory 38 | required: true 39 | - name: server 40 | type: string 41 | shortdesc: VCenter server URL 42 | required: true 43 | example: vcenter.example.com 44 | - name: credstore 45 | type: string 46 | shortdesc: Credentials file name 47 | required: true 48 | 49 | actions: 50 | - cib: | 51 | primitive {{id}}-{{node_name}} stonith:external/vcenter 52 | VI_SERVER="{{server}}" 53 | VI_CREDSTORE="{{credstore}}" 54 | HOSTLIST="{{node_name}}={{machine_name}}" 55 | RESETPOWERON="0" 56 | pcmk_host_check="static-list" 57 | pcmk_host_list="{{node_name}}" 58 | op monitor interval="60s" 59 | location loc-{{id}}-{{node_name}} {{id}}-{{node_name}} -inf: {{node_name}} 60 | property stonith-enabled=true 61 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Note that this script only installs the python modules, 3 | # the other parts of crmsh are installed by autotools 4 | from setuptools import setup 5 | import contextlib 6 | import re 7 | 8 | VERSION = '0.0.1' 9 | 10 | with contextlib.suppress(Exception): 11 | with open('version', 'r', encoding='ascii') as f: 12 | match = re.match('^\\d+\\.\\d+\\.\\d+', f.read().strip()) 13 | if match: 14 | VERSION = match.group(0) 15 | 16 | setup(name='crmsh', 17 | version=VERSION, 18 | description='Command-line interface for High-Availability cluster management', 19 | author='Kristoffer Gronlund, Xin Liang', 20 | author_email='XLiang@suse.com', 21 | url='http://crmsh.github.io/', 22 | packages=['crmsh', 'crmsh.crash_test', 'crmsh.report', 'crmsh.prun'], 23 | install_requires=['lxml', 'PyYAML', 'python-dateutil', 'packaging'], 24 | scripts=['bin/crm'], 25 | data_files=[('/usr/share/crmsh', ['doc/crm.8.adoc'])], 26 | include_package_data=True) 27 | -------------------------------------------------------------------------------- /templates/apache: -------------------------------------------------------------------------------- 1 | %name apache 2 | 3 | # Copyright (C) 2009 Dejan Muhamedagic 4 | # 5 | # License: GNU General Public License (GPL) 6 | 7 | # Apache web server 8 | # 9 | # This template generates a single primitive resource of type apache 10 | 11 | %depends_on virtual-ip 12 | %suggests filesystem 13 | 14 | # NB: 15 | # The apache RA monitor operation requires the status module to 16 | # be loaded and access to its page (/server-status) allowed from 17 | # localhost (127.0.0.1). Typically, the status module is not 18 | # loaded by default. How to enable it depends on your 19 | # distribution. For instance, on recent openSUSE or SLES 20 | # releases, it is enough to add word "status" to the list in 21 | # variable APACHE_MODULES in file /etc/sysconfig/apache2 and then 22 | # start and stop apache once using rcapache2. 23 | 24 | %required 25 | 26 | # Name the apache resource 27 | # For example, to name the resource web-1, edit the line below 28 | # as follows: 29 | # %% id web-1 30 | %% id 31 | 32 | # The full pathname of the Apache configuration file 33 | # Example: 34 | # %% configfile /etc/apache2/httpd.conf 35 | %% configfile 36 | 37 | %optional 38 | 39 | # Extra options to apply when starting apache. See man httpd(8). 40 | 41 | %% options 42 | 43 | # Files (one or more) which contain extra environment variables, 44 | # such as /etc/apache2/envvars 45 | 46 | %% envfiles 47 | 48 | %generate 49 | 50 | primitive %apache ocf:heartbeat:apache 51 | params configfile=%_:configfile 52 | opt options=%_:options 53 | opt envfiles=%_:envfiles 54 | 55 | monitor %apache 120s:60s 56 | 57 | group %_:id 58 | %if %filesystem 59 | %filesystem 60 | %fi 61 | %apache %virtual-ip 62 | -------------------------------------------------------------------------------- /templates/filesystem: -------------------------------------------------------------------------------- 1 | %name filesystem 2 | 3 | # Copyright (C) 2009 Dejan Muhamedagic 4 | # 5 | # License: GNU General Public License (GPL) 6 | 7 | # Filesystem 8 | # 9 | # This template generates a single primitive resource of type 10 | # Filesystem 11 | 12 | %required 13 | 14 | # The name of block device for the filesystem, or -U, -L 15 | # options for mount, or NFS mount specification. 16 | # Example: 17 | # %% device /dev/hda 18 | %% device 19 | 20 | # The mount point for the filesystem. 21 | # Example: 22 | # %% directory /mnt/fs 23 | %% directory 24 | 25 | # The type of filesystem to be mounted. 26 | # Example: 27 | # %% fstype xfs 28 | %% fstype 29 | 30 | %optional 31 | 32 | # Any extra options to be given as -o options to mount. 33 | # 34 | # For bind mounts, add "bind" here and set fstype to "none". 35 | # We will do the right thing for options such as "bind,ro". 36 | %% options 37 | 38 | %generate 39 | 40 | primitive %_ ocf:heartbeat:Filesystem 41 | params 42 | device=%_:device 43 | directory=%_:directory 44 | fstype=%_:fstype 45 | -------------------------------------------------------------------------------- /templates/gfs2: -------------------------------------------------------------------------------- 1 | %name gfs2 2 | 3 | # Copyright (C) 2009 Andrew Beekhof 4 | # 5 | # License: GNU General Public License (GPL) 6 | 7 | # gfs2 filesystem (cloned) 8 | # 9 | # This template generates a cloned instance of the ocfs2 filesystem 10 | # 11 | # The filesystem should be on the device, unless clvm is used 12 | # To use clvm, pull it along with this template: 13 | # new myfs ocfs2 clvm 14 | # 15 | # NB: You need just one dlm and o2cb, regardless of how many 16 | # filesystems. In other words, you can use this template only for 17 | # one filesystem and to make another one, you'll have to edit the 18 | # resulting configuration yourself. 19 | 20 | %depends_on gfs2-base 21 | %suggests clvm 22 | 23 | %required 24 | 25 | # Name the gfs2 filesystem 26 | # (for example: bigfs) 27 | # NB: The clone is going to be named c- (e.g. c-bigfs) 28 | # Example: 29 | # %% id bigfs 30 | %% id 31 | 32 | # The mount point 33 | # Example: 34 | # %% directory /mnt/bigfs 35 | %% directory 36 | 37 | # The device 38 | 39 | %% device 40 | 41 | # optional parameters for the gfs2 filesystem 42 | 43 | %optional 44 | 45 | # mount options 46 | 47 | %% options 48 | 49 | %generate 50 | 51 | primitive %_:id ocf:heartbeat:Filesystem 52 | params 53 | directory="%_:directory" 54 | fstype="gfs2" 55 | device="%_:device" 56 | opt options="%_:options" 57 | 58 | monitor %_:id 20:40 59 | 60 | clone c-%_:id %_:id 61 | meta interleave="true" ordered="true" 62 | 63 | colocation colo-%_:id-gfs inf: c-%_:id gfs-clone 64 | 65 | order order-%_:id-gfs inf: gfs-clone c-%_:id 66 | 67 | # if there's clvm, generate some constraints too 68 | # 69 | 70 | %if %clvm 71 | colocation colo-%_:id-%clvm:id inf: c-%_:id c-%clvm:id 72 | 73 | order order-%_:id-%clvm:id inf: c-%clvm:id c-%_:id 74 | %fi 75 | -------------------------------------------------------------------------------- /templates/gfs2-base: -------------------------------------------------------------------------------- 1 | %name gfs2-base 2 | 3 | # Copyright (C) 2009 Andrew Beekhof 4 | # 5 | # License: GNU General Public License (GPL) 6 | 7 | # gfs2 filesystem base (cloned) 8 | # 9 | # This template generates a cloned instance of the ocfs2 filesystem 10 | # 11 | # The filesystem should be on the device, unless clvm is used 12 | # To use clvm, pull it along with this template: 13 | # new myfs ocfs2 clvm 14 | # 15 | # NB: You need just one dlm and o2cb, regardless of how many 16 | # filesystems. In other words, you can use this template only for 17 | # one filesystem and to make another one, you'll have to edit the 18 | # resulting configuration yourself. 19 | 20 | %suggests clvm 21 | %required 22 | 23 | %generate 24 | 25 | primitive dlm ocf:pacemaker:controld 26 | 27 | clone dlm-clone dlm 28 | meta interleave="true" ordered="true" 29 | 30 | primitive gfs-controld ocf:pacemaker:controld 31 | 32 | clone gfs-clone gfs-controld 33 | meta interleave="true" ordered="true" 34 | 35 | colocation colo-gfs-dlm inf: gfs-clone dlm-clone 36 | 37 | order order-gfs-dlm inf: dlm-clone gfs-clone 38 | 39 | # if there's clvm, generate some constraints too 40 | # 41 | 42 | %if %clvm 43 | colocation colo-clvm-dlm inf: clvm-clone dlm-clone 44 | 45 | order order-clvm-dlm inf: dlm-clone clvm-clone 46 | %fi 47 | -------------------------------------------------------------------------------- /templates/sbd: -------------------------------------------------------------------------------- 1 | %name sbd 2 | 3 | # Copyright (C) 2009 Dejan Muhamedagic 4 | # 5 | # License: GNU General Public License (GPL) 6 | 7 | # Shared storage based fencing. 8 | # 9 | # This template generates a single instance of fence_sbd. 10 | # 11 | # There is quite a bit more to do to make this stonith operational. 12 | # See http://www.linux-ha.org/wiki/SBD_Fencing for information. 13 | # 14 | 15 | %required 16 | 17 | # The resource id (name). 18 | # Example: 19 | # %% id stonith-sbd 20 | %% id 21 | 22 | %generate 23 | 24 | primitive %_:id stonith:fence_sbd 25 | op monitor interval=15s timeout=60s 26 | op start timeout=60s 27 | -------------------------------------------------------------------------------- /templates/virtual-ip: -------------------------------------------------------------------------------- 1 | %name virtual-ip 2 | 3 | # Copyright (C) 2009 Dejan Muhamedagic 4 | # 5 | # License: GNU General Public License (GPL) 6 | 7 | # Virtual IP address 8 | # 9 | # This template generates a single primitive resource of type IPaddr 10 | 11 | %required 12 | 13 | # Specify an IP address 14 | # (for example: 192.168.1.101) 15 | # Example: 16 | # %% ip 192.168.1.101 17 | 18 | %% ip 19 | 20 | %optional 21 | 22 | # If your network has a mask different from its class mask, then 23 | # specify it here either in CIDR format or as a dotted quad 24 | # (for example: 24 or 255.255.255.0) 25 | # Example: 26 | # %% netmask 24 27 | 28 | %% netmask 29 | 30 | # Need LVS support? Set this to true then. 31 | 32 | %% lvs_support 33 | 34 | %generate 35 | 36 | primitive %_ ocf:heartbeat:IPaddr2 37 | params ip=%_:ip 38 | opt cidr_netmask=%_:netmask 39 | opt lvs_support=%_:lvs_support 40 | -------------------------------------------------------------------------------- /test/bugs-test.txt: -------------------------------------------------------------------------------- 1 | property stonith-enabled=false 2 | node node1 3 | op_defaults timeout=60s 4 | group g1 gr1 gr2 5 | group g2 gr3 6 | group g3 gr4 7 | primitive gr1 Dummy 8 | primitive gr2 Dummy 9 | primitive gr3 Dummy 10 | primitive gr4 Dummy 11 | location loc1 g1 rule 200: #uname eq node1 12 | -------------------------------------------------------------------------------- /test/cib-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (C) 2009 Lars Marowsky-Bree 3 | # See COPYING for license information. 4 | 5 | BASE=${1:-`pwd`}/cibtests 6 | AUTOCREATE=1 7 | 8 | logt() { 9 | local msg="$1" 10 | echo $(date) "$msg" >>$LOGF 11 | echo "$msg" 12 | } 13 | 14 | difft() { 15 | crm_diff -V -u -o $1 -n $2 16 | } 17 | 18 | run() { 19 | local cmd="$1" 20 | local erc="$2" 21 | local msg="$3" 22 | local rc 23 | local out 24 | 25 | echo $(date) "$1" >>$LOGF 26 | CIB_file=$CIB_file $1 >>$LOGF 2>&1 ; rc=$? 27 | echo $(date) "Returned: $rc (expected $erc)" >>$LOGF 28 | if [ $erc != "I" ]; then 29 | if [ $rc -ne $erc ]; then 30 | logt "$msg: FAILED ($erc != $rc)" 31 | cat $LOGF 32 | return 1 33 | fi 34 | fi 35 | echo "$msg: ok" 36 | return 0 37 | } 38 | 39 | runt() { 40 | local T="$1" 41 | local CIBE="$BASE/$(basename $T .input).exp.xml" 42 | cp $BASE/shadow.base $CIB_file 43 | run "crm" 0 "Running testcase: $T" <$T 44 | 45 | # strip attributes from CIB_file 46 | echo "" > $CIB_file.$$ 47 | tail -n +2 $CIB_file >> $CIB_file.$$ 48 | mv $CIB_file.$$ $CIB_file 49 | 50 | local rc 51 | if [ ! -e $CIBE ]; then 52 | if [ "$AUTOCREATE" = "1" ]; then 53 | logt "Creating new expected output for $T." 54 | cp $CIB_file $CIBE 55 | return 0 56 | else 57 | logt "$T: No expected output." 58 | return 0 59 | fi 60 | fi 61 | 62 | if ! crm_diff -u -o $CIBE -n $CIB_file >/dev/null 2>&1 ; then 63 | logt "$T: XML: $CIBE does not match $CIB_file" 64 | difft $CIBE $CIB_file 65 | return 1 66 | fi 67 | return 0 68 | } 69 | 70 | LOGF=$(mktemp) 71 | export PATH=/usr/sbin:$PATH 72 | 73 | export CIB_file=$BASE/shadow.test 74 | 75 | failed=0 76 | for T in $(ls $BASE/*.input) ; do 77 | runt $T 78 | failed=$(($? + $failed)) 79 | done 80 | 81 | if [ $failed -gt 0 ]; then 82 | logt "$failed tests failed!" 83 | echo "Log:" $LOGF "CIB:" $CIB_file 84 | exit 1 85 | fi 86 | 87 | logt "All tests passed!" 88 | #rm $LOGF $CIB_file 89 | exit 0 90 | 91 | -------------------------------------------------------------------------------- /test/cibtests/001.exp.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /test/cibtests/001.input: -------------------------------------------------------------------------------- 1 | configure 2 | property stonith-enabled=false 3 | primitive rsc_dummy ocf:heartbeat:Dummy 4 | monitor rsc_dummy 30 5 | commit 6 | quit 7 | -------------------------------------------------------------------------------- /test/cibtests/002.exp.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /test/cibtests/002.input: -------------------------------------------------------------------------------- 1 | configure 2 | property stonith-enabled=false 3 | primitive testfs ocf:heartbeat:Dummy \ 4 | params fake=1 5 | clone testfs-clone testfs \ 6 | meta ordered="true" interleave="true" 7 | commit 8 | quit 9 | -------------------------------------------------------------------------------- /test/cibtests/003.exp.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /test/cibtests/003.input: -------------------------------------------------------------------------------- 1 | configure 2 | property stonith-enabled=false 3 | primitive testfs ocf:heartbeat:Dummy \ 4 | params fake=2 5 | clone testfs-clone testfs \ 6 | meta ordered="true" interleave="true" 7 | commit 8 | up 9 | resource stop testfs-clone 10 | quit 11 | 12 | -------------------------------------------------------------------------------- /test/cibtests/004.exp.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /test/cibtests/004.input: -------------------------------------------------------------------------------- 1 | configure 2 | property stonith-enabled=false 3 | primitive testfs ocf:heartbeat:Dummy \ 4 | params fake=hello 5 | clone testfs-clone testfs \ 6 | meta ordered="true" interleave="true" 7 | commit 8 | up 9 | resource start testfs-clone 10 | quit 11 | 12 | -------------------------------------------------------------------------------- /test/cibtests/shadow.base: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /test/crm-interface: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2008-2011 Dejan Muhamedagic 2 | # See COPYING for license information. 3 | 4 | CIB=__crmsh_regtest 5 | 6 | filter_epoch() { 7 | sed '/^/p' | filter_date | filter_epoch 14 | } 15 | 16 | crm_setup() { 17 | $CRM_NO_REG options reset 18 | $CRM_NO_REG options check-frequency on-verify 19 | $CRM_NO_REG options check-mode relaxed 20 | $CRM_NO_REG cib delete $CIB >/dev/null 2>&1 21 | } 22 | 23 | crm_mksample() { 24 | $CRM_NO_REG cib new $CIB empty >/dev/null 2>&1 25 | $CRM_NO_REG -c $CIB< 2 | # See COPYING for license information. 3 | 4 | lead=".TRY" 5 | describe_show() { 6 | echo $lead $* 7 | } 8 | describe_showxml() { 9 | : echo $lead $* 10 | } 11 | describe_session() { 12 | echo $lead $* 13 | } 14 | describe_filesession() { 15 | echo $lead $* 16 | } 17 | describe_single() { 18 | echo $lead $* 19 | } 20 | -------------------------------------------------------------------------------- /test/features/constraints_bugs.feature: -------------------------------------------------------------------------------- 1 | @constraints 2 | Feature: Verify constraints(order/colocation/location) bug 3 | 4 | Tag @clean means need to stop cluster service if the service is available 5 | Need nodes: hanode1 hanode2 6 | 7 | Background: Setup a two nodes cluster 8 | Given Cluster service is "stopped" on "hanode1" 9 | And Cluster service is "stopped" on "hanode2" 10 | When Run "crm cluster init -y" on "hanode1" 11 | Then Cluster service is "started" on "hanode1" 12 | And Show cluster status on "hanode1" 13 | When Run "crm cluster join -c hanode1 -y" on "hanode2" 14 | Then Cluster service is "started" on "hanode2" 15 | And Online nodes are "hanode1 hanode2" 16 | And Show cluster status on "hanode1" 17 | 18 | @clean 19 | Scenario: Convert score to kind for rsc_order(bsc#1122391) 20 | When Run "crm configure primitive d1 Dummy op monitor interval=10s" on "hanode1" 21 | And Run "crm configure primitive d2 Dummy op monitor interval=10s" on "hanode1" 22 | And Run "crm configure order o1 100: d1 d2" on "hanode1" 23 | When Run "crm configure show" on "hanode1" 24 | Then Expected "order o1 Mandatory: d1 d2" in stdout 25 | -------------------------------------------------------------------------------- /test/features/coverage/coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | data_file = /.coverage 3 | parallel = True 4 | source_pkgs = crmsh 5 | -------------------------------------------------------------------------------- /test/features/coverage/sitecustomize.py: -------------------------------------------------------------------------------- 1 | import coverage 2 | import atexit 3 | cov=coverage.Coverage(config_file="/opt/crmsh/test/features/coverage/coveragerc") 4 | atexit.register(lambda:(cov.stop(),cov.save())) 5 | cov.start() 6 | -------------------------------------------------------------------------------- /test/features/environment.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | import subprocess 4 | import time 5 | 6 | import crmsh.userdir 7 | import crmsh.utils 8 | from crmsh.sh import ShellUtils 9 | 10 | 11 | def get_online_nodes(): 12 | _, out, _ = ShellUtils().get_stdout_stderr('sudo crm_node -l') 13 | if out: 14 | return re.findall(r'[0-9]+ (.*) member', out) 15 | else: 16 | return None 17 | 18 | 19 | def resource_cleanup(): 20 | subprocess.run( 21 | ['sudo', 'crm', 'resource', 'cleanup'], 22 | stdin=subprocess.DEVNULL, 23 | stdout=subprocess.DEVNULL, 24 | stderr=subprocess.DEVNULL, 25 | ) 26 | 27 | 28 | def before_step(context, step): 29 | context.logger = logging.getLogger("Step:{}".format(step.name)) 30 | 31 | 32 | def before_tag(context, tag): 33 | # tag @clean means need to stop cluster service 34 | if tag == "clean": 35 | time.sleep(3) 36 | online_nodes = get_online_nodes() 37 | if online_nodes: 38 | resource_cleanup() 39 | while True: 40 | time.sleep(1) 41 | rc, stdout, _ = ShellUtils().get_stdout_stderr('sudo crmadmin -D -t 1') 42 | if rc == 0 and stdout.startswith('Designated'): 43 | break 44 | subprocess.call( 45 | ['sudo', 'crm', 'cluster', 'stop', '--all'], 46 | stdin=subprocess.DEVNULL, 47 | stdout=subprocess.DEVNULL, 48 | stderr=subprocess.DEVNULL, 49 | ) 50 | if tag == "skip_non_root": 51 | sudoer = crmsh.userdir.get_sudoer() 52 | if sudoer or crmsh.userdir.getuser() != 'root': 53 | context.scenario.skip() 54 | -------------------------------------------------------------------------------- /test/features/geo_setup.feature: -------------------------------------------------------------------------------- 1 | @geo 2 | Feature: geo cluster 3 | 4 | Test geo cluster setup using bootstrap 5 | Tag @clean means need to stop cluster service if the service is available 6 | Need nodes: hanode1 hanode2 hanode3 7 | 8 | @clean 9 | Scenario: GEO cluster setup 10 | Given Cluster service is "stopped" on "hanode1" 11 | And Cluster service is "stopped" on "hanode2" 12 | When Run "crm cluster init -y -n cluster1" on "hanode1" 13 | Then Cluster service is "started" on "hanode1" 14 | When Run "crm configure primitive vip IPaddr2 params ip=@vip.0" on "hanode1" 15 | 16 | When Run "crm cluster init -y -n cluster2" on "hanode2" 17 | Then Cluster service is "started" on "hanode2" 18 | When Run "crm configure primitive vip IPaddr2 params ip=@vip.1" on "hanode2" 19 | 20 | When Run "crm cluster geo_init -y --clusters "cluster1=@vip.0 cluster2=@vip.1" --tickets tickets-geo --arbitrator hanode3" on "hanode1" 21 | When Run "crm cluster geo_join -y --cluster-node hanode1 --clusters "cluster1=@vip.0 cluster2=@vip.1"" on "hanode2" 22 | 23 | Given Service "booth@booth" is "stopped" on "hanode3" 24 | When Run "crm cluster geo_init_arbitrator -y --cluster-node hanode1" on "hanode3" 25 | Then Service "booth@booth" is "started" on "hanode3" 26 | When Run "crm resource start g-booth" on "hanode1" 27 | Then Show cluster status on "hanode1" 28 | When Run "crm resource start g-booth" on "hanode2" 29 | Then Show cluster status on "hanode2" 30 | -------------------------------------------------------------------------------- /test/features/healthcheck.feature: -------------------------------------------------------------------------------- 1 | @healthcheck 2 | Feature: healthcheck detect and fix problems in a crmsh deployment 3 | 4 | Tag @clean means need to stop cluster service if the service is available 5 | Need nodes: hanode1 hanode2 hanode3 6 | 7 | Background: Setup a two nodes cluster 8 | Given Cluster service is "stopped" on "hanode1" 9 | And Cluster service is "stopped" on "hanode2" 10 | And Cluster service is "stopped" on "hanode3" 11 | When Run "crm cluster init -y" on "hanode1" 12 | Then Cluster service is "started" on "hanode1" 13 | And Show cluster status on "hanode1" 14 | When Run "crm cluster join -c hanode1 -y" on "hanode2" 15 | Then Cluster service is "started" on "hanode2" 16 | And Online nodes are "hanode1 hanode2" 17 | And Show cluster status on "hanode1" 18 | 19 | @clean 20 | Scenario: An upgrade_seq file in ~hacluster/crmsh/ will be migrated to /var/lib/crmsh (bsc#1213050) 21 | When Run "rm -rf ~hacluster/.ssh" on "hanode1" 22 | And Try "crm cluster health hawk2" on "hanode1" 23 | Then Expected "hawk2: passwordless ssh authentication: FAIL." in stderr 24 | When Run "crm cluster health hawk2 --fix" on "hanode1" 25 | Then Expected "hawk2: passwordless ssh authentication: OK." in stdout 26 | When Run "rm -rf ~hacluster/.ssh /root/.config/crm" on "hanode1" 27 | And Try "crm cluster health hawk2" on "hanode1" 28 | Then Expected "hawk2: passwordless ssh authentication: FAIL." in stderr 29 | When Try "crm cluster health hawk2 --fix" on "hanode1" 30 | Then Expected "Cannot fix automatically" in stderr 31 | -------------------------------------------------------------------------------- /test/features/steps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/test/features/steps/__init__.py -------------------------------------------------------------------------------- /test/history-test.tar.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterLabs/crmsh/0d5733b5625e918d9d3a3f407e710049ae1718d2/test/history-test.tar.bz2 -------------------------------------------------------------------------------- /test/list-undocumented-commands.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # Script to discover and report undocumented commands. 4 | 5 | from crmsh.ui_root import Root 6 | from crmsh import help 7 | 8 | help.HELP_FILE = "doc/crm.8.adoc" 9 | help._load_help() 10 | 11 | _IGNORED_COMMANDS = ('help', 'quit', 'cd', 'up', 'ls') 12 | 13 | 14 | def check_help(ui): 15 | for name, child in ui.children().items(): 16 | if child.type == 'command': 17 | try: 18 | h = help.help_command(ui.name, name) 19 | if h.generated and name not in _IGNORED_COMMANDS: 20 | print("Undocumented: %s %s" % (ui.name, name)) 21 | except: 22 | print("Undocumented: %s %s" % (ui.name, name)) 23 | elif child.type == 'level': 24 | h = help.help_level(name) 25 | if h.generated: 26 | print("Undocumented: %s %s" % (ui.name, name)) 27 | check_help(child.level) 28 | 29 | check_help(Root()) 30 | -------------------------------------------------------------------------------- /test/profile-history.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | case $1 in 4 | cumulative) 5 | python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"cumulative\").print_stats()" | less 6 | ;; 7 | time) 8 | python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\").print_stats()" | less 9 | ;; 10 | timecum) 11 | python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\", \"cum\").print_stats()" | less 12 | ;; 13 | callers) 14 | python -c "import pstats; s = pstats.Stats(\"$2\"); s.print_callers(.5, \"$3\")" | less 15 | ;; 16 | verbose) 17 | PYTHONPATH=. ./crm -X "$2" -H "$3" history log 18 | ;; 19 | *) 20 | PYTHONPATH=. ./crm -X "$1" -H "$2" history log >/dev/null 21 | ;; 22 | esac 23 | -------------------------------------------------------------------------------- /test/testcases/acl: -------------------------------------------------------------------------------- 1 | show ACL 2 | node node1 3 | property enable-acl=true 4 | primitive d0 ocf:pacemaker:Dummy 5 | primitive d1 ocf:pacemaker:Dummy 6 | role basic-read \ 7 | read status \ 8 | read type:node attribute:uname \ 9 | read type:node attribute:type \ 10 | read property 11 | role basic-read-basic \ 12 | read cib 13 | role d0-admin \ 14 | write meta:d0:target-role \ 15 | write meta:d0:is-managed \ 16 | read ref:d0 17 | role silly-role \ 18 | write meta:d0:target-role \ 19 | write meta:d0:is-managed \ 20 | read ref:d0 \ 21 | read status \ 22 | read type:node attribute:uname \ 23 | read type:node attribute:type \ 24 | read utilization:d0 \ 25 | read property:stonith-enabled \ 26 | write property \ 27 | read node \ 28 | read node:node1 \ 29 | read nodeattr \ 30 | read nodeattr:a1 \ 31 | read nodeutil \ 32 | read nodeutil:node1 \ 33 | read status \ 34 | read cib 35 | role silly-role-two \ 36 | read xpath:"//nodes//attributes" \ 37 | deny tag:nvpair \ 38 | deny ref:d0 39 | acl_target alice \ 40 | basic-read-basic 41 | acl_target bob \ 42 | d0-admin \ 43 | basic-read-basic 44 | role cyrus-role \ 45 | write meta:d0:target-role \ 46 | write meta:d0:is-managed \ 47 | read ref:d0 \ 48 | read status \ 49 | read type:node attribute:uname \ 50 | read type:node attribute:type \ 51 | read property 52 | acl_target cyrus cyrus-role 53 | _test 54 | verify 55 | . 56 | -------------------------------------------------------------------------------- /test/testcases/acl.excl: -------------------------------------------------------------------------------- 1 | INFO: 5: already using schema pacemaker-1.2 2 | -------------------------------------------------------------------------------- /test/testcases/basicset: -------------------------------------------------------------------------------- 1 | confbasic 2 | bundle 3 | confbasic-xml 4 | edit 5 | rset 6 | rset-xml 7 | delete 8 | node 9 | resource 10 | file 11 | shadow 12 | ra 13 | acl 14 | history 15 | newfeatures 16 | commit 17 | bugs 18 | scripts 19 | -------------------------------------------------------------------------------- /test/testcases/bugs: -------------------------------------------------------------------------------- 1 | session Configuration bugs 2 | options 3 | sort-elements false 4 | up 5 | configure 6 | erase 7 | property stonith-enabled=false 8 | primitive p4 Dummy 9 | primitive p3 Dummy 10 | primitive p2 Dummy 11 | primitive p1 Dummy 12 | colocation c1 inf: p1 p2 13 | filter "sed 's/p1 p2/& p3/'" c1 14 | show c1 15 | delete c1 16 | colocation c2 inf: [ p1 p2 ] p3 p4 17 | filter "sed 's/\\\[/\\\(/;s/\\\]/\\\)/'" c2 18 | show c2 19 | primitive p5 Dummy 20 | primitive p6 Dummy 21 | clone cl-p5 p5 22 | show 23 | commit 24 | _test 25 | verify 26 | show 27 | . 28 | session Unordered load file 29 | options 30 | sort-elements false 31 | up 32 | configure 33 | load update bugs-test.txt 34 | show 35 | commit 36 | _test 37 | verify 38 | . 39 | session Unknown properties 40 | configure 41 | erase 42 | property stonith-enabled=false 43 | property SAPHanaSR: \ 44 | hana_ha1_site_lss_WDF1=4 45 | show 46 | commit 47 | _test 48 | verify 49 | property SAPHanaSR_2: \ 50 | hana_ha1_site_iss_WDF1=cde \ 51 | hana_ha1_site_bss_WDF1=abc 52 | show 53 | commit 54 | _test 55 | verify 56 | . 57 | session template 58 | configure 59 | erase 60 | property stonith-enabled=false 61 | node node1 62 | template 63 | new vip virtual-ip params ip=10.10.10.123 64 | load vip 65 | apply update 66 | up 67 | commit 68 | _test 69 | verify 70 | . 71 | -------------------------------------------------------------------------------- /test/testcases/bundle: -------------------------------------------------------------------------------- 1 | show Basic configure 2 | node node1 3 | delete node1 4 | node node1 \ 5 | attributes mem=16G 6 | node node2 utilization cpu=4 7 | bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped 8 | primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped 9 | bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1 10 | _test 11 | verify 12 | . 13 | -------------------------------------------------------------------------------- /test/testcases/bundle.exp: -------------------------------------------------------------------------------- 1 | .TRY Basic configure 2 | .INP: configure 3 | .INP: _regtest on 4 | .INP: erase 5 | .INP: erase nodes 6 | .INP: property stonith-enabled=false 7 | .INP: node node1 8 | .INP: delete node1 9 | .INP: node node1 attributes mem=16G 10 | .INP: node node2 utilization cpu=4 11 | .INP: bundle id=bundle-test1 docker image=test network ip-range-start=10.10.10.123 port-mapping id=port1 port=80 storage storage-mapping id=storage1 target-dir=test source-dir=test meta target-role=Stopped 12 | .INP: primitive id=dummy ocf:heartbeat:Dummy op monitor interval=10 meta target-role=Stopped 13 | .EXT crm_resource --show-metadata ocf:heartbeat:Dummy 14 | .INP: bundle id=bundle-test2 docker image=test network ip-range-start=10.10.10.123 primitive dummy meta target-role=Stopped priority=1 15 | .INP: _test 16 | .INP: verify 17 | .EXT crm_attribute --list-options=cluster --all --output-as=xml 18 | .EXT crm_resource --list-options=primitive --all --output-as=xml 19 | .INP: show 20 | node node1 \ 21 | attributes mem=16G 22 | node node2 \ 23 | utilization cpu=4 24 | primitive dummy Dummy \ 25 | meta target-role=Stopped \ 26 | op monitor interval=10s timeout=20s \ 27 | op start timeout=20s interval=0s \ 28 | op stop timeout=20s interval=0s 29 | bundle bundle-test1 \ 30 | docker image=test \ 31 | network ip-range-start=10.10.10.123 \ 32 | port-mapping port=80 \ 33 | storage \ 34 | storage-mapping target-dir=test source-dir=test \ 35 | meta target-role=Stopped 36 | bundle bundle-test2 \ 37 | docker image=test \ 38 | network ip-range-start=10.10.10.123 \ 39 | primitive dummy \ 40 | meta target-role=Stopped priority=1 41 | property cib-bootstrap-options: \ 42 | stonith-enabled=false 43 | .INP: commit 44 | -------------------------------------------------------------------------------- /test/testcases/commit: -------------------------------------------------------------------------------- 1 | show Commits of all kinds 2 | op_defaults timeout=2m 3 | commit 4 | node node1 \ 5 | attributes mem=16G 6 | primitive p1 ocf:heartbeat:Dummy \ 7 | op monitor interval=60m \ 8 | op monitor interval=120m OCF_CHECK_LEVEL=10 9 | primitive p2 ocf:heartbeat:Dummy 10 | primitive p3 ocf:heartbeat:Dummy 11 | group g1 p1 p2 12 | clone c1 g1 13 | location l1 p3 100: node1 14 | order o1 Mandatory: p3 c1 15 | colocation cl1 inf: c1 p3 16 | primitive d1 ocf:heartbeat:Dummy 17 | primitive d2 ocf:heartbeat:Dummy 18 | primitive d3 ocf:heartbeat:Dummy 19 | commit 20 | rename p3 pp3 21 | commit 22 | rename pp3 p3 23 | delete c1 24 | commit 25 | group g2 d1 d2 26 | commit 27 | delete g2 28 | commit 29 | filter "sed '/g1/s/p1/d1/'" 30 | group g2 d3 d2 31 | delete d2 32 | commit 33 | _test 34 | verify 35 | . 36 | -------------------------------------------------------------------------------- /test/testcases/common.excl: -------------------------------------------------------------------------------- 1 | Could not send fail-count-p0=\(null\) update via attrd: connection failed 2 | Could not send fail-count-p0= update via attrd: connection failed 3 | Could not send s1=\(null\) update via attrd: connection failed 4 | Could not send s1= update via attrd: connection failed 5 | Error performing operation: The object/attribute does not exist 6 | Error setting fail-count-p0=5 \(section=status, set=status-node1\): The object/attribute does not exist 7 | Error setting s1=1 2 3 \(section=status, set=status-node1\): The object/attribute does not exist 8 | Error signing on to the CRMd service 9 | Error connecting to the controller 10 | Error performing operation: Transport endpoint is not connected 11 | Error performing operation: Not connected 12 | .EXT crm_resource --list-ocf-providers 13 | .EXT crm_resource --list-ocf-alternatives Delay 14 | .EXT crm_resource --list-ocf-alternatives Dummy 15 | ^\.EXT crmd version 16 | ^\.EXT cibadmin \-Q 17 | ^\.EXT crm_verify \-VV \-p 18 | ^\.EXT cibadmin \-p \-P 19 | ^\.EXT crm_diff \-\-help 20 | ^\.EXT crm_diff \-o [^ ]+ \-n \- 21 | ^\.EXT crm_diff \-\-no\-version \-o [^ ]+ \-n \- 22 | ^\.EXT sed ['][^']+ 23 | ^\.EXT sed ["][^"]+ 24 | ^\.EXT [a-zA-Z]+ validate-all 25 | ^[ ]+File ["][^"]+ 26 | \(cluster\_status\) warning\: Fencing and resource management disabled due to lack of quorum 27 | not fencing unseen nodes 28 | -------------------------------------------------------------------------------- /test/testcases/common.filter: -------------------------------------------------------------------------------- 1 | #!/usr/bin/awk -f 2 | # 1. replace .EXT [path/] with .EXT 3 | /\.EXT \/(.+)/ { gsub(/\/.*\//, "", $2) } 4 | /\.EXT >\/dev\/null 2>&1 \/(.+)/ { gsub(/\/.*\//, "", $4) } 5 | /\.EXT pacemaker-fenced/ { gsub(/pacemaker-fenced/,"stonithd") } 6 | /\.EXT pacemaker-controld/ { gsub(/pacemaker-controld/,"crmd") } 7 | /\.EXT pacemaker-schedulerd/ { gsub(/pacemaker-schedulerd/,"pengine") } 8 | /\.EXT pacemaker-based/ { gsub(/pacemaker-based/,"cib") } 9 | { print } 10 | -------------------------------------------------------------------------------- /test/testcases/confbasic-xml: -------------------------------------------------------------------------------- 1 | showxml Basic configure (xml dump) 2 | node node1 3 | delete node1 4 | node node1 \ 5 | attributes mem=16G 6 | node node2 utilization cpu=4 7 | primitive d1 ocf:pacemaker:Dummy \ 8 | operations $id=d1-ops \ 9 | op monitor interval=60m \ 10 | op monitor interval=120m OCF_CHECK_LEVEL=10 11 | monitor d1 60s:30s 12 | primitive d2 ocf:heartbeat:Delay \ 13 | params mondelay=60 \ 14 | op start timeout=60s \ 15 | op stop timeout=60s 16 | monitor d2:Started 60s:30s 17 | group g1 d1 d2 18 | primitive d3 ocf:pacemaker:Dummy 19 | clone c d3 \ 20 | meta clone-max=1 21 | primitive d4 ocf:pacemaker:Dummy 22 | clone m d4 meta promotable=true 23 | primitive s5 ocf:pacemaker:Stateful \ 24 | operations $id-ref=d1-ops 25 | primitive s6 ocf:pacemaker:Stateful \ 26 | operations $id-ref=d1 27 | clone m5 s5 meta promotable=true 28 | clone m6 s6 meta promotable=true 29 | location l1 g1 100: node1 30 | location l2 c \ 31 | rule $id=l2-rule1 100: #uname eq node1 32 | location l3 m5 \ 33 | rule inf: #uname eq node1 and pingd gt 0 34 | location l4 m5 \ 35 | rule -inf: not_defined pingd or pingd lte 0 36 | location l5 m5 \ 37 | rule -inf: not_defined pingd or pingd lte 0 38 | location l8 m5 \ 39 | rule inf: #uname eq node1 and \ 40 | pingd gt 0 and \ 41 | date lt 2009-05-26 and \ 42 | date in start=2009-05-26 end=2009-07-26 and \ 43 | date in start=2009-05-26 years=2009 and \ 44 | date spec years=2009 hours=09-17 45 | location l6 m5 \ 46 | rule $id-ref=l2-rule1 47 | location l7 m5 \ 48 | rule $id-ref=l2 49 | colocation c1 inf: m6 m5 50 | colocation c2 inf: m5:Promoted d1:Started 51 | order o1 Mandatory: m5 m6 52 | order o2 Optional: d1:start m5:promote 53 | order o3 Serialize: m5 m6 54 | order o4 Mandatory: m5 m6 55 | rsc_ticket ticket-A_m6 ticket-A: m6 56 | rsc_ticket ticket-B_m6_m5 ticket-B: m6 m5 loss-policy=stop 57 | rsc_ticket ticket-C_master ticket-C: m6 m5:Promoted loss-policy=stop 58 | property $id=cpset2 maintenance-mode=true 59 | rsc_defaults failure-timeout=10m 60 | op_defaults $id=opsdef2 record-pending=true 61 | _test 62 | verify 63 | . 64 | -------------------------------------------------------------------------------- /test/testcases/confbasic-xml.filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | grep -v "WARNING" 3 | -------------------------------------------------------------------------------- /test/testcases/delete: -------------------------------------------------------------------------------- 1 | session Delete/Rename test 2 | configure 3 | # erase to start from scratch 4 | erase 5 | erase nodes 6 | property stonith-enabled=false 7 | node node1 8 | primitive d1 ocf:pacemaker:Dummy 9 | primitive d2 ocf:pacemaker:Dummy 10 | location d1-pref d1 100: node1 11 | show 12 | _test 13 | rename d1 p1 14 | show 15 | # delete primitive 16 | delete d2 17 | _test 18 | show 19 | # delete primitive with constraint 20 | delete p1 21 | _test 22 | show 23 | primitive d1 ocf:pacemaker:Dummy 24 | location d1-pref d1 100: node1 25 | _test 26 | # delete primitive belonging to a group 27 | primitive d2 ocf:pacemaker:Dummy 28 | _test 29 | group g1 d2 d1 30 | delete d2 31 | show 32 | _test 33 | delete g1 34 | show 35 | verify 36 | # delete a group which is in a clone 37 | primitive d2 ocf:pacemaker:Dummy 38 | group g1 d2 d1 39 | clone c1 g1 40 | delete g1 41 | show 42 | _test 43 | group g1 d2 d1 44 | clone c1 g1 45 | _test 46 | # delete group from a clone (again) 47 | delete g1 48 | show 49 | _test 50 | group g1 d2 d1 51 | clone c1 g1 52 | # delete primitive and its group and their clone 53 | delete d2 d1 c1 g1 54 | show 55 | _test 56 | # verify 57 | verify 58 | commit 59 | . 60 | -------------------------------------------------------------------------------- /test/testcases/edit.excl: -------------------------------------------------------------------------------- 1 | ^\.EXT sed \-[re] ['][^'] 2 | -------------------------------------------------------------------------------- /test/testcases/file: -------------------------------------------------------------------------------- 1 | configure save sample.txt 2 | %ext cat sample.txt 3 | configure erase nodes 4 | configure load replace sample.txt 5 | %ext sed -i 's/60s/2m/' sample.txt 6 | %ext sed -i '8a # comment' sample.txt 7 | session Load update 8 | configure 9 | delete m1 p1 10 | property cluster-recheck-interval="10m" 11 | load update sample.txt 12 | . 13 | configure show 14 | %ext rm sample.txt 15 | -------------------------------------------------------------------------------- /test/testcases/history: -------------------------------------------------------------------------------- 1 | session History 2 | history 3 | source history-test.tar.bz2 4 | info 5 | events 6 | node 15sp1-1 7 | node 15sp1-2 8 | node .* 9 | exclude pcmk_peer_update 10 | exclude 11 | node 15sp1-2 12 | exclude clear 13 | exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd 14 | exclude clear 15 | peinputs 16 | peinputs v 17 | transitions 18 | refresh 19 | resource d1 20 | # reduce report span 21 | timeframe "2019-03-22 15:07:37" 22 | peinputs 23 | resource d1 24 | exclude corosync|pacemaker-based|pacemaker-fenced|pacemaker-execd|pacemaker-attrd|pacemaker-schedulerd|pacemaker-controld|sshd 25 | transition log 26 | transition nograph 27 | transition -1 nograph 28 | transition save 0 _crmsh_regtest 29 | transition log 49 30 | transition tags 49 31 | # reset timeframe 32 | timeframe 33 | session save _crmsh_regtest 34 | session load _crmsh_regtest 35 | session 36 | session pack 37 | . 38 | session History 2 39 | history 40 | session load _crmsh_regtest 41 | exclude 42 | . 43 | -------------------------------------------------------------------------------- /test/testcases/history.excl: -------------------------------------------------------------------------------- 1 | ^ptest.*: 2 | ^\.EXT tar -C ['][^']+['] -cj -f ['][^']+['] _crmsh_regtest 3 | ^Report saved in ['][^']+ 4 | -------------------------------------------------------------------------------- /test/testcases/history.post: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | crm history session delete _crmsh_regtest 3 | rm -r history-test 4 | -------------------------------------------------------------------------------- /test/testcases/history.pre: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | crm history session delete _crmsh_regtest 3 | rm -rf history-test 4 | -------------------------------------------------------------------------------- /test/testcases/newfeatures: -------------------------------------------------------------------------------- 1 | session New features 2 | configure 3 | # erase to start from scratch 4 | erase 5 | erase nodes 6 | property stonith-enabled=false 7 | node node1 8 | primitive p0 Dummy params $p0-state:state=1 9 | primitive p1 Dummy params \ 10 | rule role=Started date in start=2009-05-26 end=2010-05-26 or date gt 2014-01-01 \ 11 | state=2 12 | primitive p2 Dummy params @p0-state 13 | tag tag1: p0 p1 p2 14 | tag tag2 p0 p1 p2 15 | location l1 { p0 p1 p2 } inf: node1 16 | primitive node1 Dummy 17 | tag ones l1 p1 18 | alert notify_9 /usr/share/pacemaker/alerts/alert_snmp.sh \ 19 | attributes \ 20 | trap_add_hires_timestamp_oid="false" \ 21 | trap_node_states="non-trap" \ 22 | trap_resource_tasks="start,stop,monitor,promote,demote" \ 23 | to "192.168.40.9" 24 | alert notify_10 /usr/share/pacemaker/alerts/alert_snmp.sh \ 25 | attributes \ 26 | trap_add_hires_timestamp_oid="false" \ 27 | select attributes { master-prmStateful test1 } \ 28 | to 192.168.28.188 29 | alert notify_11 /usr/share/pacemaker/alerts/alert_snmp.sh \ 30 | select fencing nodes resources \ 31 | to 192.168.28.188 32 | show tag:ones and type:location 33 | show tag:ones and p1 34 | show 35 | _test 36 | verify 37 | commit 38 | . 39 | -------------------------------------------------------------------------------- /test/testcases/node: -------------------------------------------------------------------------------- 1 | node show 2 | node show node1 3 | %setenv showobj=node1 4 | configure primitive p5 Dummy 5 | configure group g0 p5 6 | resource maintenance g0 7 | resource maintenance p5 8 | -F node maintenance node1 9 | node ready node1 10 | node attribute node1 set a1 "1 2 3" 11 | node attribute node1 show a1 12 | node attribute node1 delete a1 13 | node clearstate node1 14 | 15 | -------------------------------------------------------------------------------- /test/testcases/options: -------------------------------------------------------------------------------- 1 | session Options 2 | options 3 | reset 4 | pager cat 5 | editor vi 6 | show 7 | check-frequency never 8 | check-mode nosuchever 9 | colorscheme normal,yellow,cyan,red,green,magenta 10 | colorscheme normal,yellow,cyan,red 11 | pager nosuchprogram 12 | skill-level operator 13 | skill-level joe 14 | skill-level expert 15 | output plain 16 | output misplain 17 | wait true 18 | wait off 19 | wait happy 20 | show 21 | save 22 | . 23 | options show 24 | -------------------------------------------------------------------------------- /test/testcases/options.exp: -------------------------------------------------------------------------------- 1 | .TRY Options 2 | .INP: options 3 | .INP: reset 4 | .INP: pager cat 5 | .INP: editor vi 6 | .INP: show 7 | editor "vi" 8 | pager "cat" 9 | user "" 10 | skill-level "expert" 11 | output "color" 12 | colorscheme "yellow,normal,cyan,red,green,magenta" 13 | sort-elements "yes" 14 | check-frequency "always" 15 | check-mode "strict" 16 | wait "no" 17 | add-quotes "yes" 18 | manage-children "ask" 19 | .INP: check-frequency never 20 | .INP: check-mode nosuchever 21 | ERROR: nosuchever not valid (choose one from strict,relaxed) 22 | .INP: colorscheme normal,yellow,cyan,red,green,magenta 23 | .INP: colorscheme normal,yellow,cyan,red 24 | ERROR: bad color scheme: normal,yellow,cyan,red 25 | .INP: pager nosuchprogram 26 | ERROR: nosuchprogram does not exist or is not a program 27 | .INP: skill-level operator 28 | .INP: skill-level joe 29 | ERROR: joe not valid (choose one from operator,administrator,expert) 30 | .INP: skill-level expert 31 | .INP: output plain 32 | .INP: output misplain 33 | ERROR: misplain not valid (choose one from plain,color,uppercase) 34 | .INP: wait true 35 | .INP: wait off 36 | .INP: wait happy 37 | ERROR: happy not valid (yes or no are valid) 38 | .INP: show 39 | editor "vi" 40 | pager "cat" 41 | user "" 42 | skill-level "expert" 43 | output "plain" 44 | colorscheme "normal,yellow,cyan,red,green,magenta" 45 | sort-elements "yes" 46 | check-frequency "never" 47 | check-mode "strict" 48 | wait "off" 49 | add-quotes "yes" 50 | manage-children "ask" 51 | .INP: save 52 | .TRY options show 53 | editor "vi" 54 | pager "cat" 55 | user "" 56 | skill-level "expert" 57 | output "plain" 58 | colorscheme "normal,yellow,cyan,red,green,magenta" 59 | sort-elements "yes" 60 | check-frequency "never" 61 | check-mode "strict" 62 | wait "off" 63 | add-quotes "yes" 64 | manage-children "ask" 65 | -------------------------------------------------------------------------------- /test/testcases/ra: -------------------------------------------------------------------------------- 1 | session RA interface 2 | ra 3 | providers IPaddr2 4 | providers Dummy 5 | info ocf:pacemaker:Dummy 6 | . 7 | -------------------------------------------------------------------------------- /test/testcases/ra.exp: -------------------------------------------------------------------------------- 1 | .TRY RA interface 2 | .INP: ra 3 | .INP: providers IPaddr2 4 | 5 | heartbeat 6 | .INP: providers Dummy 7 | heartbeat pacemaker 8 | .INP: info ocf:pacemaker:Dummy 9 | .EXT crm_resource --show-metadata ocf:pacemaker:Dummy 10 | ocf:pacemaker:Dummy - Example stateless resource agent 11 | 12 | This is a dummy OCF resource agent. It does absolutely nothing except keep track 13 | of whether it is running or not, and can be configured so that actions fail or 14 | take a long time. Its purpose is primarily for testing, and to serve as a 15 | template for resource agent writers. 16 | 17 | ## Parameters (*: required, []: default): 18 | 19 | envfile (string): Environment dump file 20 | If this is set, the environment will be dumped to this file for every call. 21 | 22 | fail_start_on (string): Report bogus start failure on specified host 23 | Start, migrate_from, and reload-agent actions will return failure if running on 24 | the host specified here, but the resource will run successfully anyway (future 25 | monitor calls will find it running). This can be used to test on-fail=ignore. 26 | 27 | fake (string, [dummy]): Fake attribute that can be changed to cause an agent reload 28 | Fake attribute that can be changed to cause an agent reload 29 | 30 | op_sleep (string, [0]): Operation sleep duration in seconds. 31 | Number of seconds to sleep during operations. This can be used to test how 32 | the cluster reacts to operation timeouts. 33 | 34 | passwd (string): Password 35 | Fake password field 36 | 37 | state (string, [state-file]): State file 38 | Location to store the resource state in. 39 | 40 | ## Operations' defaults (advisory minimum): 41 | 42 | start timeout=20s 43 | stop timeout=20s 44 | monitor timeout=20s interval=10s depth=0 45 | reload timeout=20s 46 | reload-agent timeout=20s 47 | migrate_to timeout=20s 48 | migrate_from timeout=20s 49 | -------------------------------------------------------------------------------- /test/testcases/ra.filter: -------------------------------------------------------------------------------- 1 | #!/usr/bin/awk -f 2 | # reduce the providers list to heartbeat and pacemaker 3 | # (prevents other providers creeping in) 4 | function reduce(a) { 5 | a["heartbeat"]=1; a["pacemaker"]=1; 6 | s=""; 7 | for( i=1; i<=NF; i++ ) 8 | if( $i in a ) 9 | s=s" "$i; 10 | return substr(s,2); 11 | } 12 | n==1 { n=0; print reduce(a); next; } 13 | /providers IPaddr/ { n=1; } 14 | /providers Dummy/ { n=1; } 15 | /^state \(string, \[(.*)\]\):/ { gsub(/\[.*\]/, "[state-file]") } 16 | { print } 17 | -------------------------------------------------------------------------------- /test/testcases/resource: -------------------------------------------------------------------------------- 1 | resource status p0 2 | %setenv showobj=p3 3 | resource start p3 4 | resource stop p3 5 | %setenv showobj=c1 6 | resource manage c1 7 | resource unmanage c1 8 | %setenv showobj=p2 9 | resource maintenance p2 on 10 | resource maintenance p2 off 11 | %setenv showobj=cli-prefer-p3 12 | resource migrate p3 node1 13 | %setenv showobj= 14 | resource unmigrate p3 15 | %setenv showobj=cli-prefer-p3 16 | resource migrate p3 node1 force 17 | %setenv showobj= 18 | resource unmigrate p3 19 | %setenv showobj=p0 20 | resource param p0 set a0 "1 2 3" 21 | resource param p0 show a0 22 | resource param p0 delete a0 23 | resource meta p0 set m0 123 24 | resource meta p0 show m0 25 | resource meta p0 delete m0 26 | resource trace p0 probe 27 | resource trace p0 start 28 | resource trace p0 stop 29 | resource untrace p0 probe 30 | resource untrace p0 start 31 | resource untrace p0 stop 32 | configure group g p0 p3 33 | options manage-children never 34 | resource start g 35 | resource start p0 36 | resource stop g 37 | configure clone cg g 38 | options manage-children always 39 | resource start g 40 | resource stop g 41 | resource start cg 42 | resource stop p0 43 | resource start cg 44 | resource stop cg 45 | resource stop p3 46 | %setenv showobj= 47 | configure rename p3 p4 48 | configure primitive p3 Dummy 49 | resource stop p3 50 | resource start p3 51 | resource stop p3 52 | configure rm cg 53 | configure clone msg g meta promotable=true 54 | resource scores 55 | %setenv showobj= 56 | configure primitive p5 Dummy 57 | configure group g1 p5 58 | resource manage p5 59 | %setenv showobj=p5 60 | -F resource maintenance p5 on 61 | %setenv showobj=p5 62 | -F resource unmanage p5 63 | %setenv showobj=p5 64 | -F resource maintenance g1 65 | resource start p5 66 | %setenv showobj=g1 67 | -F resource manage g1 68 | resource start p5 69 | %setenv showobj=p5 70 | -F resource maintenance p5 on 71 | %setenv showobj=g1 72 | -F resource maintenance g1 73 | -------------------------------------------------------------------------------- /test/testcases/rset: -------------------------------------------------------------------------------- 1 | show Resource sets 2 | node node1 3 | primitive d1 ocf:pacemaker:Dummy 4 | primitive d2 ocf:heartbeat:Dummy 5 | primitive d3 ocf:heartbeat:Dummy 6 | primitive d4 ocf:heartbeat:Dummy 7 | primitive d5 ocf:heartbeat:Dummy 8 | order o1 Serialize: d1 d2 ( d3 d4 ) 9 | colocation c1 inf: d4 ( d1 d2 d3 ) 10 | colocation c2 inf: d1 d2 d3 d4 11 | colocation c3 inf: ( d3 d4 ) ( d1 d2 ) 12 | delete d2 13 | show o1 c1 c2 c3 14 | delete d4 15 | show o1 c1 c2 c3 16 | _test 17 | verify 18 | . 19 | -------------------------------------------------------------------------------- /test/testcases/rset-xml: -------------------------------------------------------------------------------- 1 | showxml Resource sets 2 | node node1 3 | primitive d1 ocf:pacemaker:Dummy 4 | primitive d2 ocf:heartbeat:Dummy 5 | primitive d3 ocf:heartbeat:Dummy 6 | primitive d4 ocf:heartbeat:Dummy 7 | primitive d5 ocf:heartbeat:Dummy 8 | order o1 Serialize: d1 d2 ( d3 d4 ) 9 | colocation c1 inf: d4 ( d1 d2 d3 ) 10 | colocation c2 inf: d1 d2 d3 d4 11 | colocation c3 inf: ( d3 d4 ) ( d1 d2 ) 12 | delete d2 13 | delete d4 14 | _test 15 | verify 16 | . 17 | -------------------------------------------------------------------------------- /test/testcases/rset-xml.exp: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /test/testcases/rset.exp: -------------------------------------------------------------------------------- 1 | .TRY Resource sets 2 | .INP: configure 3 | .INP: _regtest on 4 | .INP: erase 5 | .INP: erase nodes 6 | .INP: property stonith-enabled=false 7 | .INP: node node1 8 | .INP: primitive d1 ocf:pacemaker:Dummy 9 | .EXT crm_resource --show-metadata ocf:pacemaker:Dummy 10 | .INP: primitive d2 ocf:heartbeat:Dummy 11 | .EXT crm_resource --show-metadata ocf:heartbeat:Dummy 12 | .INP: primitive d3 ocf:heartbeat:Dummy 13 | .INP: primitive d4 ocf:heartbeat:Dummy 14 | .INP: primitive d5 ocf:heartbeat:Dummy 15 | .INP: order o1 Serialize: d1 d2 ( d3 d4 ) 16 | .INP: colocation c1 inf: d4 ( d1 d2 d3 ) 17 | .INP: colocation c2 inf: d1 d2 d3 d4 18 | .INP: colocation c3 inf: ( d3 d4 ) ( d1 d2 ) 19 | .INP: delete d2 20 | INFO: 16: constraint order:o1 updated 21 | INFO: 16: constraint colocation:c1 updated 22 | INFO: 16: constraint colocation:c2 updated 23 | INFO: 16: constraint colocation:c3 updated 24 | .INP: show o1 c1 c2 c3 25 | colocation c1 inf: d4 ( d1 d3 ) 26 | colocation c2 inf: d1 d3 d4 27 | colocation c3 inf: ( d3 d4 ) ( d1 ) 28 | order o1 Serialize: d1 ( d3 d4 ) 29 | .INP: delete d4 30 | INFO: 18: constraint order:o1 updated 31 | INFO: 18: constraint colocation:c1 updated 32 | INFO: 18: constraint colocation:c2 updated 33 | INFO: 18: constraint colocation:c3 updated 34 | .INP: show o1 c1 c2 c3 35 | colocation c1 inf: ( d1 d3 ) 36 | colocation c2 inf: d3 d1 37 | colocation c3 inf: d3 d1 38 | order o1 Serialize: d1 d3 39 | .INP: _test 40 | .INP: verify 41 | .EXT crm_attribute --list-options=cluster --all --output-as=xml 42 | .EXT crm_resource --list-options=primitive --all --output-as=xml 43 | .INP: show 44 | node node1 45 | primitive d1 ocf:pacemaker:Dummy \ 46 | op monitor timeout=20s interval=10s \ 47 | op start timeout=20s interval=0s \ 48 | op stop timeout=20s interval=0s 49 | primitive d3 Dummy \ 50 | op monitor timeout=20s interval=10s \ 51 | op start timeout=20s interval=0s \ 52 | op stop timeout=20s interval=0s 53 | primitive d5 Dummy \ 54 | op monitor timeout=20s interval=10s \ 55 | op start timeout=20s interval=0s \ 56 | op stop timeout=20s interval=0s 57 | colocation c1 inf: ( d1 d3 ) 58 | colocation c2 inf: d3 d1 59 | colocation c3 inf: d3 d1 60 | order o1 Serialize: d1 d3 61 | property cib-bootstrap-options: \ 62 | stonith-enabled=false 63 | .INP: commit 64 | -------------------------------------------------------------------------------- /test/testcases/scripts: -------------------------------------------------------------------------------- 1 | session Cluster scripts 2 | script 3 | list 4 | list all 5 | list names 6 | list names all 7 | list all names 8 | show mailto 9 | verify mailto id=foo email=test@example.com subject=hello 10 | run mailto id=foo email=test@example.com subject=hello nodes=node1 dry_run=true 11 | json '["show", "mailto"]' 12 | json '["verify", "mailto", {"id":"foo", "email":"test@example.com", "subject":"hello"}]' 13 | . 14 | -------------------------------------------------------------------------------- /test/testcases/scripts.filter: -------------------------------------------------------------------------------- 1 | #!/usr/bin/awk -f 2 | # 1. replace .EXT [path/] with .EXT 3 | /\*\* localhost - crm --wait --no configure load update (\/tmp\/crm-tmp-.+)/ { gsub(/.*/, "<>", $NF) } 4 | { print } 5 | -------------------------------------------------------------------------------- /test/testcases/shadow: -------------------------------------------------------------------------------- 1 | filesession Shadow CIB management 2 | cib 3 | new regtest force 4 | reset regtest 5 | use regtest 6 | commit regtest 7 | delete regtest 8 | use 9 | delete regtest 10 | . 11 | -------------------------------------------------------------------------------- /test/testcases/shadow.exp: -------------------------------------------------------------------------------- 1 | .TRY Shadow CIB management 2 | .INP: cib 3 | .INP: new regtest force 4 | .EXT >/dev/null /dev/null /dev/null /dev/null /p' 6 | -------------------------------------------------------------------------------- /test/unittests/pacemaker.log.2: -------------------------------------------------------------------------------- 1 | Jan 03 11:03:31 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 2 joined 2 | Jan 03 11:03:41 15sp1-1 pacemaker-fenced [1944] (pcmk_cpg_membership) info: Group event stonith-ng.3: node 1 (15sp1-1) is member 3 | Jan 03 11:03:51 15sp1-1 pacemaker-fenced [1944] (corosync_node_name) info: Unable to get node name for nodeid 2 4 | -------------------------------------------------------------------------------- /test/unittests/schemas/acls-1.1.rng: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /test/unittests/schemas/acls-1.2.rng: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /test/unittests/schemas/fencing.rng: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | ([a-zA-Z0-9_\.\-]+)(,[a-zA-Z0-9_\.\-]+)* 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /test/unittests/schemas/nvset.rng: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /test/unittests/schemas/score.rng: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | INFINITY 13 | +INFINITY 14 | -INFINITY 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /test/unittests/schemas/versions.rng: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | none 12 | pacemaker-0.6 13 | transitional-0.6 14 | pacemaker-0.7 15 | pacemaker-1.0 16 | pacemaker-1.1 17 | pacemaker-1.2 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /test/unittests/scripts/inc1/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | shortdesc: Include test script 1 3 | longdesc: Test if includes work ok 4 | parameters: 5 | - name: foo 6 | type: boolean 7 | shortdesc: An optional feature 8 | - name: bar 9 | type: string 10 | shortdesc: A string of characters 11 | value: the name is the game 12 | - name: is-required 13 | type: int 14 | required: true 15 | actions: 16 | - call: ls /tmp 17 | when: foo 18 | shortdesc: ls 19 | - call: "echo '{{foo}}'" 20 | shortdesc: foo 21 | - call: "echo '{{bar}}'" 22 | shortdesc: bar 23 | -------------------------------------------------------------------------------- /test/unittests/scripts/inc2/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - version: 2.2 3 | shortdesc: Includes another script 4 | longdesc: This one includes another script 5 | parameters: 6 | - name: wiz 7 | type: string 8 | - name: foo 9 | type: boolean 10 | shortdesc: A different foo 11 | include: 12 | - script: inc1 13 | name: included-script 14 | parameters: 15 | - name: is-required 16 | value: 33 17 | actions: 18 | - call: "echo 'before {{wiz}}'" 19 | shortdesc: before wiz 20 | - include: included-script 21 | - call: "echo 'after {{foo}}'" 22 | shortdesc: after foo 23 | - cib: | 24 | {{included-script:is-required}} 25 | - cib: | 26 | {{wiz}} 27 | -------------------------------------------------------------------------------- /test/unittests/scripts/legacy/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Initialize a new cluster 3 | description: > 4 | Initializes a new cluster on the nodes provided. Will try to 5 | configure SSH if not already configured, and install missing 6 | packages. 7 | 8 | A more user-friendly interface to this script is provided by the 9 | cluster init command. 10 | parameters: 11 | - name: iface 12 | description: "Use the given interface. Try to auto-detect interface by default." 13 | default: "" 14 | 15 | - name: transport 16 | description: "Corosync transport (mcast or udpu)" 17 | default: "udpu" 18 | 19 | - name: bindnetaddr 20 | description: "Network address to bind to (e.g.: 192.168.1.0)" 21 | default: "" 22 | 23 | - name: mcastaddr 24 | description: "Multicast address (e.g.: 239.x.x.x)" 25 | default: "" 26 | 27 | - name: mcastport 28 | description: "Multicast port" 29 | default: 5405 30 | 31 | steps: 32 | - name: Configure SSH 33 | apply_local: configure.py ssh 34 | 35 | - name: Check state of nodes 36 | collect: collect.py 37 | 38 | - name: Verify parameters 39 | validate: verify.py 40 | 41 | - name: Install packages 42 | apply: configure.py install 43 | 44 | - name: Generate corosync authkey 45 | apply_local: authkey.py 46 | 47 | - name: Configure cluster nodes 48 | apply: configure.py corosync 49 | 50 | - name: Initialize cluster 51 | apply_local: init.py 52 | 53 | -------------------------------------------------------------------------------- /test/unittests/scripts/templates/apache.xml: -------------------------------------------------------------------------------- 1 | 2 | 37 | -------------------------------------------------------------------------------- /test/unittests/scripts/templates/virtual-ip.xml: -------------------------------------------------------------------------------- 1 | 2 | 63 | -------------------------------------------------------------------------------- /test/unittests/scripts/unified/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | shortdesc: Unified Script 3 | longdesc: > 4 | Test if we can define multiple steps in a single script 5 | category: test 6 | steps: 7 | - parameters: 8 | - name: id 9 | type: resource 10 | required: true 11 | shortdesc: Identifier 12 | - name: vip 13 | shortdesc: Configure the virtual IP 14 | parameters: 15 | - name: id 16 | type: resource 17 | required: true 18 | shortdesc: IP Identifier 19 | - name: ip 20 | type: ip_address 21 | required: true 22 | shortdesc: The IP Address 23 | actions: 24 | - cib: | 25 | primitive {{vip:id}} IPaddr2 ip={{vip:ip}} 26 | group g-{{id}} {{id}} {{vip:id}} 27 | -------------------------------------------------------------------------------- /test/unittests/scripts/v2/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - version: 2.2 3 | shortdesc: Apache Webserver 4 | longdesc: > 5 | Configure a resource group containing a virtual IP address and 6 | an instance of the Apache web server. 7 | category: Server 8 | parameters: 9 | - name: id 10 | shortdesc: The ID specified here is for the web server resource group. 11 | - name: install 12 | type: boolean 13 | value: true 14 | shortdesc: Disable if no installation should be performed 15 | include: 16 | - agent: test:apache 17 | parameters: 18 | - name: id 19 | value: "{{id}}-server" 20 | - name: configfile 21 | type: file 22 | ops: | 23 | op monitor interval=20s timeout=20s 24 | - agent: test:virtual-ip 25 | name: virtual-ip 26 | parameters: 27 | - name: id 28 | value: "{{id}}-ip" 29 | - name: ip 30 | type: ip_address 31 | ops: | 32 | op monitor interval=20s timeout=20s 33 | actions: 34 | - install: 35 | - apache2 36 | when: install 37 | - call: a2enable mod_status 38 | shortdesc: Enable status module 39 | nodes: all 40 | when: install 41 | - cib: | 42 | {{virtual-ip}} 43 | {{apache}} 44 | group {{id}} 45 | {{virtual-ip:id}} 46 | {{apache:id}} 47 | -------------------------------------------------------------------------------- /test/unittests/scripts/vip/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - version: 2.2 3 | shortdesc: Virtual IP 4 | category: Basic 5 | include: 6 | - agent: test:virtual-ip 7 | name: virtual-ip 8 | parameters: 9 | - name: id 10 | type: resource 11 | required: true 12 | - name: ip 13 | type: ip_address 14 | required: true 15 | - name: cidr_netmask 16 | type: integer 17 | required: false 18 | - name: broadcast 19 | type: ipaddress 20 | required: false 21 | - name: lvs_support 22 | required: false 23 | type: boolean 24 | ops: | 25 | op start timeout="20" op stop timeout="20" 26 | op monitor interval="10" timeout="20" 27 | actions: 28 | - include: virtual-ip 29 | -------------------------------------------------------------------------------- /test/unittests/scripts/vipinc/main.yml: -------------------------------------------------------------------------------- 1 | version: 2.2 2 | category: Test 3 | shortdesc: Test script include 4 | include: 5 | - script: vip 6 | parameters: 7 | - name: id 8 | value: vip1 9 | - name: ip 10 | value: 192.168.200.100 11 | actions: 12 | - include: vip 13 | - cib: | 14 | clone c-{{vip:id}} {{vip:id}} 15 | -------------------------------------------------------------------------------- /test/unittests/scripts/workflows/10-webserver.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Web Server 5 | 6 | Configure a resource group containing a virtual IP address and 7 | an instance of the Apache web server. You may wish to use this 8 | in conjunction with a filesystem resource; in this case you will 9 | need to separately configure the filesystem then add colocation 10 | and ordering constraints to have it start before the resource 11 | group you create here. 12 | 13 | 14 | 15 | 16 | The ID specified here is for the web server resource group. 17 | 18 | 19 | Group ID 20 | 21 | Unique ID for the web server resource group in the cluster. 22 | 23 | 24 | 25 | 26 | 27 | 28 | 33 | 42 | 43 | 44 | 45 | group 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /test/unittests/test.conf: -------------------------------------------------------------------------------- 1 | [path] 2 | sharedir = ../../doc 3 | cache = ../../doc 4 | crm_config = . 5 | crm_daemon_dir = . 6 | crm_daemon_user = hacluster 7 | ocf_root = . 8 | crm_dtd_dir = . 9 | pe_state_dir = . 10 | heartbeat_dir = . 11 | -------------------------------------------------------------------------------- /test/unittests/test_cib.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from __future__ import unicode_literals 3 | # Copyright (C) 2015 Kristoffer Gronlund 4 | # See COPYING for license information. 5 | from crmsh import cibconfig 6 | from lxml import etree 7 | import copy 8 | 9 | factory = cibconfig.cib_factory 10 | 11 | 12 | def setup_function(): 13 | "set up test fixtures" 14 | from crmsh import idmgmt 15 | idmgmt.clear() 16 | 17 | 18 | def teardown_function(): 19 | pass 20 | 21 | 22 | def test_cib_schema_change(): 23 | "Changing the validate-with CIB attribute" 24 | copy_of_cib = copy.copy(factory.cib_orig) 25 | print(etree.tostring(copy_of_cib, pretty_print=True)) 26 | tmp_cib_objects = factory.cib_objects 27 | factory.cib_objects = [] 28 | factory.change_schema("pacemaker-1.1") 29 | factory.cib_objects = tmp_cib_objects 30 | factory._copy_cib_attributes(copy_of_cib, factory.cib_orig) 31 | assert factory.cib_attrs["validate-with"] == "pacemaker-1.1" 32 | assert factory.cib_elem.get("validate-with") == "pacemaker-1.1" 33 | -------------------------------------------------------------------------------- /test/unittests/test_gv.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | # Copyright (C) 2015 Kristoffer Gronlund 3 | # See COPYING for license information. 4 | 5 | 6 | import re 7 | 8 | from crmsh import crm_gv 9 | from crmsh import cibconfig 10 | 11 | 12 | def test_digits_ident(): 13 | g = crm_gv.gv_types["dot"]() 14 | cibconfig.set_graph_attrs(g, ".") 15 | 16 | g.new_node("1a", top_node=True) 17 | g.new_attr("1a", 'label', "1a") 18 | g.new_node("a", top_node=True) 19 | g.new_attr("a", 'label', "a") 20 | 21 | expected = [ 22 | 'fontname="Helvetica";', 23 | 'fontsize="11";', 24 | 'compound="true";', 25 | '"1a" [label="1a"];', 26 | 'a [label="a"];', 27 | ] 28 | out = '\n'.join(g.repr()).replace('\t', '') 29 | 30 | for line in re.match( 31 | r'^digraph G {\n\n(?P.*)\n}$', out, re.M | re.S 32 | ).group('expected').split('\n'): 33 | assert line in expected 34 | expected.remove(line) 35 | 36 | assert len(expected) == 0 37 | -------------------------------------------------------------------------------- /test/unittests/test_objset.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | # Copyright (C) 2014 Kristoffer Gronlund 3 | # See COPYING for license information. 4 | 5 | 6 | from crmsh import cibconfig 7 | 8 | factory = cibconfig.cib_factory 9 | 10 | 11 | def assert_in(needle, haystack): 12 | if needle not in haystack: 13 | message = "%s not in %s" % (needle, haystack) 14 | raise AssertionError(message) 15 | 16 | 17 | def setup_function(): 18 | "set up test fixtures" 19 | from crmsh import idmgmt 20 | idmgmt.clear() 21 | 22 | 23 | def teardown_function(): 24 | pass 25 | 26 | 27 | def test_nodes_nocli(): 28 | for n in factory.node_id_list(): 29 | obj = factory.find_object(n) 30 | if obj is not None: 31 | assert obj.node is not None 32 | assert True == obj.cli_use_validate() 33 | assert False == obj.nocli 34 | 35 | 36 | def test_show(): 37 | setobj = cibconfig.mkset_obj() 38 | s = setobj.repr_nopretty() 39 | sp = s.splitlines() 40 | assert_in("node ha-one", sp[0:3]) 41 | -------------------------------------------------------------------------------- /test/unittests/test_time.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | # Copyright (C) 2014 Kristoffer Gronlund 3 | # See COPYING for license information. 4 | 5 | 6 | from crmsh import utils 7 | from crmsh import logtime 8 | import time 9 | import datetime 10 | import dateutil.tz 11 | 12 | 13 | def test_time_convert1(): 14 | loctz = dateutil.tz.tzlocal() 15 | tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz)))) 16 | dt = utils.parse_time('Jun 01, 2015 10:00:00') 17 | assert logtime.human_date(dt) == time.strftime('%Y-%m-%d %H:%M:%S', tm) 18 | 19 | 20 | def test_time_convert2(): 21 | loctz = dateutil.tz.tzlocal() 22 | tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz)))) 23 | ts = time.localtime(utils.parse_to_timestamp('Jun 01, 2015 10:00:00')) 24 | assert time.strftime('%Y-%m-%d %H:%M:%S', ts) == time.strftime('%Y-%m-%d %H:%M:%S', tm) 25 | -------------------------------------------------------------------------------- /test/unittests/test_upgradeuitl.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import unittest 4 | from unittest import mock 5 | 6 | from crmsh import upgradeutil 7 | 8 | 9 | class TestUpgradeCondition(unittest.TestCase): 10 | @mock.patch('crmsh.upgradeutil._get_file_content') 11 | @mock.patch('os.stat') 12 | def test_is_upgrade_needed_by_force_upgrade(self, mock_stat: mock.MagicMock, mock_get_file_content): 13 | mock_stat.return_value = mock.Mock(os.stat_result) 14 | mock_get_file_content.return_value = b'' 15 | self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2'])) 16 | 17 | @mock.patch('crmsh.upgradeutil._get_file_content') 18 | @mock.patch('os.stat') 19 | def test_is_upgrade_needed_by_non_existent_seq( 20 | self, 21 | mock_stat: mock.MagicMock, 22 | mock_get_file_content: mock.MagicMock, 23 | ): 24 | mock_stat.side_effect = FileNotFoundError() 25 | mock_get_file_content.return_value = b'' 26 | self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2'])) 27 | 28 | @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ') 29 | @mock.patch('crmsh.upgradeutil._get_file_content') 30 | @mock.patch('os.stat') 31 | def test_is_upgrade_needed_by_seq_less_than_expected( 32 | self, 33 | mock_stat, 34 | mock_get_file_content, 35 | mock_current_upgrade_seq: mock.MagicMock, 36 | ): 37 | mock_stat.side_effect = FileNotFoundError() 38 | mock_get_file_content.return_value = b'0.1\n' 39 | mock_current_upgrade_seq.__gt__.return_value = True 40 | self.assertTrue(upgradeutil._is_upgrade_needed(['node-1', 'node-2'])) 41 | 42 | @mock.patch('crmsh.upgradeutil.CURRENT_UPGRADE_SEQ') 43 | @mock.patch('crmsh.upgradeutil._get_file_content') 44 | @mock.patch('os.stat') 45 | def test_is_upgrade_needed_by_seq_not_less_than_expected( 46 | self, 47 | mock_stat, 48 | mock_get_file_content, 49 | mock_current_upgrade_seq: mock.MagicMock, 50 | ): 51 | mock_stat.side_effect = FileNotFoundError() 52 | mock_get_file_content.return_value = b'1.0\n' 53 | mock_current_upgrade_seq.__gt__.return_value = False 54 | self.assertFalse(upgradeutil._is_upgrade_needed(['node-1', 'node-2'])) 55 | -------------------------------------------------------------------------------- /test/update-expected-output.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | crmtestout="$1" 3 | 4 | [ -d "$crmtestout" ] || { echo "usage: $0 "; exit 1; } 5 | 6 | for f in $crmtestout/*.diff; do 7 | fil=$(grep -- --- $f | awk '{print $2}' | sed 's/\/usr\/share\/crmsh\/tests/\/test/g') 8 | awk "NR==1{\$2=\"a$fil\"}1" < "$f" | awk "NR==2{\$2=\"b$fil\"}1" 9 | done 10 | -------------------------------------------------------------------------------- /test_container/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/opensuse/tumbleweed 2 | MAINTAINER Xin Liang 3 | 4 | CMD ["/usr/lib/systemd/systemd", "--system"] 5 | 6 | RUN zypper -n install systemd openssh \ 7 | firewalld iptables iptables-backend-nft \ 8 | make autoconf automake vim which libxslt-tools mailx iproute2 iputils bzip2 tar file glibc-locale-base dos2unix cpio gawk sudo \ 9 | python313 python313-pip python313-lxml python313-python-dateutil python313-build python313-PyYAML python313-curses python313-behave python313-coverage python313-packaging \ 10 | csync2 corosync corosync-qdevice pacemaker pacemaker-remote booth corosync-qnetd 11 | 12 | RUN ssh-keygen -t rsa -f /root/.ssh/id_rsa -N '' && \ 13 | cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \ 14 | chmod 0600 /root/.ssh/authorized_keys 15 | 16 | RUN mkdir -p /var/log/crmsh 17 | 18 | COPY behave_agent.py /opt 19 | COPY behave-agent.socket /etc/systemd/system 20 | COPY behave-agent@.service /etc/systemd/system 21 | RUN systemctl enable behave-agent.socket 22 | -------------------------------------------------------------------------------- /test_container/behave-agent.socket: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=behave test agent 3 | 4 | [Socket] 5 | ListenStream=1122 6 | Accept=yes 7 | 8 | [Install] 9 | WantedBy=sockets.target 10 | -------------------------------------------------------------------------------- /test_container/behave-agent@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=behave test agent 3 | CollectMode=inactive-or-failed 4 | 5 | [Service] 6 | ExecStart=/opt/behave_agent.py 7 | StandardInput=socket 8 | StandardOutput=socket 9 | StandardError=journal 10 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # content of: tox.ini , put in same dir as setup.py 2 | [tox] 3 | envlist = py311, py312, py313 4 | skip_missing_interpreters = true 5 | 6 | [base] 7 | changedir = test/unittests 8 | deps = 9 | pytest 10 | pytest-cov 11 | commands = pytest -vv --cov=crmsh --cov-config .coveragerc --cov-report term --cov-report xml {posargs} 12 | 13 | [testenv] 14 | changedir = {[base]changedir} 15 | deps = {[base]deps} 16 | commands = {[base]commands} 17 | 18 | [testenv:3.11] 19 | changedir = {[base]changedir} 20 | deps = {[base]deps} 21 | commands = {[base]commands} 22 | 23 | [testenv:3.12] 24 | changedir = {[base]changedir} 25 | deps = {[base]deps} 26 | commands = {[base]commands} 27 | -------------------------------------------------------------------------------- /update-data-manifest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Copyright (C) 2015 Kristoffer Gronlund 3 | # 4 | # This program is free software; you can redistribute it and/or 5 | # modify it under the terms of the GNU General Public License 6 | # as published by the Free Software Foundation; either version 2 7 | # of the License, or (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU General Public License 15 | # along with this program; if not, write to the Free Software 16 | # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 | # 18 | # Generate the data-manifest file which lists 19 | # all files which should be installed to /usr/share/crmsh 20 | target=data-manifest 21 | [ -f $target ] && (printf "Removing $target..."; rm $target) 22 | printf "Generating $target..." 23 | cat < $target 24 | version 25 | $(git ls-files scripts templates utils test) 26 | EOF 27 | [ ! -f $target ] && printf "FAILED\n" 28 | [ -f $target ] && printf "OK\n" 29 | -------------------------------------------------------------------------------- /utils/crm_clean.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import os 3 | import sys 4 | import shutil 5 | errors = [] 6 | mydir = os.path.dirname(os.path.abspath(sys.modules[__name__].__file__)) 7 | 8 | 9 | def bad(path): 10 | return ((not os.path.isabs(path)) or os.path.dirname(path) == '/' or 11 | path.startswith('/var') or path.startswith('/usr') or 12 | (not path.startswith(mydir))) 13 | 14 | for f in sys.argv[1:]: 15 | if bad(f): 16 | errors.append("cannot remove %s from %s" % (f, mydir)) 17 | continue 18 | try: 19 | if os.path.isfile(f): 20 | os.remove(f) 21 | elif os.path.isdir(f): 22 | if os.path.isfile(os.path.join(f, 'crm_script.debug')): 23 | print(open(os.path.join(f, 'crm_script.debug')).read()) 24 | 25 | # to check whether this clean request came from health 26 | # if it does, delete all except health-report 27 | del_flag = 0 28 | for x in os.listdir(f): 29 | if x.startswith("health-report"): 30 | del_flag = 1 31 | 32 | if del_flag == 1: 33 | for x in os.listdir(f): 34 | if x.startswith("health-report"): 35 | continue 36 | if os.path.isfile(x): 37 | os.remove(x) 38 | elif os.path.isdir(x): 39 | shutil.rmtree(x) 40 | else: 41 | shutil.rmtree(f) 42 | except OSError as e: 43 | errors.append(e) 44 | if errors: 45 | print('\n'.join(errors), file=sys.stderr) 46 | sys.exit(1) 47 | -------------------------------------------------------------------------------- /version.in: -------------------------------------------------------------------------------- 1 | @PACKAGE_VERSION@ 2 | --------------------------------------------------------------------------------