├── .codespell.ignore.files ├── .codespell.ignore.words ├── .github └── workflows │ └── formulaCI.yml ├── .gitignore ├── .yamllint.yaml ├── LICENSE ├── Makefile ├── README.md ├── _service ├── ci ├── grains_hana01 ├── grains_hana02 └── minion ├── example ├── deploy.sh ├── pillar │ └── top.sls └── salt │ └── top.sls ├── form.yml ├── hana ├── defaults.yaml ├── enable_cost_optimized.sls ├── enable_primary.sls ├── enable_secondary.sls ├── extract_hana_package.sls ├── ha_cluster.sls ├── init.sls ├── install.sls ├── macros │ ├── get_hana_client_path.sls │ └── get_hana_exe_extract_dir.sls ├── map.jinja ├── monitoring.sls ├── packages.sls ├── pre_validation.sls ├── saptune.sls └── templates │ ├── cluster_resources.j2 │ ├── ha_cluster_sudoers.j2 │ ├── hanadb_exporter.j2 │ └── srCostOptMemConfig_hook.j2 ├── metadata.yml ├── pillar.example ├── saphanabootstrap-formula.changes └── saphanabootstrap-formula.spec /.codespell.ignore.files: -------------------------------------------------------------------------------- 1 | venv,.git 2 | -------------------------------------------------------------------------------- /.codespell.ignore.words: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /.github/workflows/formulaCI.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Formula CI 3 | # this workflow will 4 | # - run formula validation step - ci/validate-formula.sh 5 | # - deliver the package content to the configured repository 6 | # - submit the new package content to the upstream repository 7 | on: [push, pull_request] # yamllint disable-line rule:truthy 8 | env: 9 | PACKAGE_NAME: saphanabootstrap-formula 10 | jobs: 11 | tab: 12 | name: 'tabspace checking' 13 | runs-on: ubuntu-20.04 14 | 15 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 16 | defaults: 17 | run: 18 | shell: bash 19 | 20 | steps: 21 | # Checkout the repository to the GitHub Actions runner 22 | - name: Checkout 23 | uses: actions/checkout@v2 24 | 25 | - name: tab 26 | run: make test-tab 27 | codespell: 28 | name: 'spell checking' 29 | runs-on: ubuntu-20.04 30 | 31 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 32 | defaults: 33 | run: 34 | shell: bash 35 | 36 | steps: 37 | # Checkout the repository to the GitHub Actions runner 38 | - name: Checkout 39 | uses: actions/checkout@v2 40 | 41 | - name: Install linting tools 42 | run: | 43 | sudo apt-get install -y git python3 python3-pip 44 | python3 -m pip install codespell 45 | 46 | - name: codespell 47 | run: make test-codespell 48 | 49 | shellcheck: 50 | name: 'script syntax check' 51 | runs-on: ubuntu-20.04 52 | 53 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 54 | defaults: 55 | run: 56 | shell: bash 57 | 58 | steps: 59 | # Checkout the repository to the GitHub Actions runner 60 | - name: Checkout 61 | uses: actions/checkout@v2 62 | 63 | - name: Install linting tools 64 | run: | 65 | sudo apt-get install -y git python3 python3-pip shellcheck 66 | 67 | - name: shellcheck 68 | run: make test-shellcheck 69 | 70 | yamllint: 71 | name: 'yaml linting' 72 | runs-on: ubuntu-20.04 73 | 74 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 75 | defaults: 76 | run: 77 | shell: bash 78 | 79 | steps: 80 | # Checkout the repository to the GitHub Actions runner 81 | - name: Checkout 82 | uses: actions/checkout@v2 83 | 84 | - name: Install linting tools 85 | run: | 86 | sudo apt-get install -y git python3 python3-pip 87 | python3 -m pip install codespell 88 | 89 | - name: yamllint 90 | run: make test-yamllint 91 | 92 | jsonlint: 93 | name: 'json linting' 94 | runs-on: ubuntu-20.04 95 | 96 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 97 | defaults: 98 | run: 99 | shell: bash 100 | 101 | steps: 102 | # Checkout the repository to the GitHub Actions runner 103 | - name: Checkout 104 | uses: actions/checkout@v2 105 | 106 | - name: Install linting tools 107 | run: | 108 | sudo apt-get install -y git python3 python3-pip 109 | python3 -m pip install jsonlint 110 | 111 | - name: jsonlint 112 | run: make test-jsonlint 113 | 114 | mlc: 115 | name: 'markup link checker' 116 | runs-on: ubuntu-20.04 117 | 118 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 119 | defaults: 120 | run: 121 | shell: bash 122 | 123 | steps: 124 | # Checkout the repository to the GitHub Actions runner 125 | - name: Checkout 126 | uses: actions/checkout@v2 127 | 128 | - name: Install linting tools 129 | run: | 130 | mkdir -p bin 131 | curl -L https://github.com/becheran/mlc/releases/download/v0.14.3/mlc-x86_64-linux -o bin/mlc 132 | chmod +x bin/mlc 133 | echo "$PWD/bin" >> $GITHUB_PATH 134 | 135 | - name: mlc 136 | run: make test-mlc 137 | 138 | salt: 139 | runs-on: ubuntu-20.04 140 | steps: 141 | - uses: actions/checkout@v2 142 | with: 143 | fetch-depth: 0 144 | - name: install salt 145 | run: | 146 | sudo curl -fsSL -o /usr/share/keyrings/salt-archive-keyring.gpg https://repo.saltproject.io/py3/ubuntu/20.04/amd64/latest/salt-archive-keyring.gpg 147 | echo "deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg arch=amd64] https://repo.saltproject.io/py3/ubuntu/20.04/amd64/latest focal main" | sudo tee /etc/apt/sources.list.d/salt.list 148 | sudo apt-get update 149 | sudo apt-get install salt-common 150 | - name: salt 151 | run: make test-salt 152 | 153 | delivery: 154 | needs: [tab, codespell, shellcheck, yamllint, jsonlint, mlc, salt] 155 | runs-on: ubuntu-20.04 156 | if: ${{ github.event_name != 'pull_request' }} 157 | container: 158 | image: shap/continuous_deliver 159 | env: 160 | OBS_USER: ${{ secrets.OBS_USER }} 161 | OBS_PASS: ${{ secrets.OBS_PASS }} 162 | OBS_PROJECT: ${{ secrets.OBS_PROJECT }} 163 | steps: 164 | - uses: actions/checkout@v2 165 | with: 166 | fetch-depth: 0 167 | - name: configure OSC 168 | # OSC credentials must be configured beforehand as the HOME variables cannot be changed from /github/home 169 | # that is used to run osc commands 170 | run: | 171 | /scripts/init_osc_creds.sh 172 | mkdir -p $HOME/.config/osc 173 | cp /root/.config/osc/oscrc $HOME/.config/osc 174 | - name: deliver package 175 | run: | 176 | sed -i 's~%%VERSION%%~${{ github.sha }}~' _service && \ 177 | sed -i 's~%%REPOSITORY%%~${{ github.repository }}~' _service && \ 178 | /scripts/upload.sh 179 | 180 | submit: 181 | needs: [tab, codespell, shellcheck, yamllint, jsonlint, mlc, salt, delivery] 182 | runs-on: ubuntu-20.04 183 | if: ${{ github.event_name != 'pull_request' && github.ref == 'refs/heads/main' }} 184 | container: 185 | image: shap/continuous_deliver 186 | env: 187 | OBS_USER: ${{ secrets.OBS_USER }} 188 | OBS_PASS: ${{ secrets.OBS_PASS }} 189 | OBS_PROJECT: ${{ secrets.OBS_PROJECT}} 190 | TARGET_PROJECT: ${{ secrets.TARGET_PROJECT}} 191 | steps: 192 | - uses: actions/checkout@v2 193 | with: 194 | fetch-depth: 0 195 | - name: configure OSC 196 | run: | 197 | /scripts/init_osc_creds.sh 198 | mkdir -p $HOME/.config/osc 199 | cp /root/.config/osc/oscrc $HOME/.config/osc 200 | - name: submit package 201 | run: | 202 | sed -i 's~%%VERSION%%~${{ github.sha }}~' _service && \ 203 | sed -i 's~%%REPOSITORY%%~${{ github.repository }}~' _service && \ 204 | /scripts/submit.sh 205 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /example/pillar/hana.sls 2 | /grains 3 | /minion 4 | /top.sls 5 | /var/ 6 | venv 7 | .envrc 8 | .direnv 9 | shell.nix 10 | .ropeproject 11 | -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | ignore: | 5 | venv 6 | 7 | rules: 8 | # 80 chars should be enough, but don't fail if a line is longer 9 | line-length: 10 | max: 220 11 | level: warning 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | =========================================================================== 204 | 205 | Below is a summary of the licensing used by external modules that are 206 | bundled with SaltStack. 207 | 208 | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 209 | Upstream-Name: salt 210 | Upstream-Contact: salt-users@googlegroups.com 211 | Source: https://github.com/saltstack/salt 212 | 213 | Files: * 214 | Copyright: 2014 SaltStack Team 215 | License: Apache-2.0 216 | Licensed under the Apache License, Version 2.0 (the "License"); 217 | you may not use this file except in compliance with the License. 218 | You may obtain a copy of the License at 219 | . 220 | http://www.apache.org/licenses/LICENSE-2.0 221 | . 222 | Unless required by applicable law or agreed to in writing, software 223 | distributed under the License is distributed on an "AS IS" BASIS, 224 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 225 | See the License for the specific language governing permissions and 226 | limitations under the License. 227 | . 228 | On Debian systems, the full text of the Apache License, Version 2.0 can be 229 | found in the file 230 | `/usr/share/common-licenses/Apache-2.0'. 231 | 232 | Files: debian/* 233 | Copyright: 2013 Joe Healy 234 | 2012 Michael Prokop 235 | 2012 Christian Hofstaedtler 236 | 2012 Ulrich Dangel 237 | 2012 Corey Quinn 238 | 2011 Aaron Toponce 239 | License: Apache-2.0 240 | Licensed under the Apache License, Version 2.0 (the "License"); 241 | you may not use this file except in compliance with the License. 242 | You may obtain a copy of the License at 243 | . 244 | http://www.apache.org/licenses/LICENSE-2.0 245 | . 246 | Unless required by applicable law or agreed to in writing, software 247 | distributed under the License is distributed on an "AS IS" BASIS, 248 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 249 | See the License for the specific language governing permissions and 250 | limitations under the License. 251 | . 252 | On Debian systems, the full text of the Apache License, Version 2.0 can be 253 | found in the file 254 | `/usr/share/common-licenses/Apache-2.0'. 255 | 256 | Files: salt/auth/pam.py 257 | Copyright: 2007 Chris AtLee 258 | License: MIT License 259 | Permission is hereby granted, free of charge, to any person obtaining a copy 260 | of this software and associated documentation files (the "Software"), to deal 261 | in the Software without restriction, including without limitation the rights 262 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 263 | copies of the Software, and to permit persons to whom the Software is 264 | furnished to do so, subject to the following conditions: 265 | . 266 | The above copyright notice and this permission notice shall be included in 267 | all copies or substantial portions of the Software. 268 | . 269 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 270 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 271 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 272 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 273 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 274 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 275 | THE SOFTWARE. 276 | 277 | Files: doc/_ext/youtube.py 278 | Copyright: 2009 Chris Pickel 279 | License: BSD-2-clause 280 | Redistribution and use in source and binary forms, with or without 281 | modification, are permitted provided that the following conditions are 282 | met: 283 | . 284 | * Redistributions of source code must retain the above copyright 285 | notice, this list of conditions and the following disclaimer. 286 | . 287 | * Redistributions in binary form must reproduce the above copyright 288 | notice, this list of conditions and the following disclaimer in the 289 | documentation and/or other materials provided with the distribution. 290 | . 291 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 293 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 294 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 295 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 296 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 297 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 298 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 299 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 300 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 301 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 302 | 303 | Files: salt/ext/six.py 304 | Copyright: 2010-2014 Benjamin Peterson 305 | License: MIT License 306 | Permission is hereby granted, free of charge, to any person obtaining a copy 307 | of this software and associated documentation files (the "Software"), to deal 308 | in the Software without restriction, including without limitation the rights 309 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 310 | copies of the Software, and to permit persons to whom the Software is 311 | furnished to do so, subject to the following conditions: 312 | . 313 | The above copyright notice and this permission notice shall be included in 314 | all copies or substantial portions of the Software. 315 | . 316 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 317 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 318 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 319 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 320 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 321 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 322 | THE SOFTWARE. 323 | 324 | Files: doc/_ext/images 325 | Copyright: 2013 SaltStack Team 326 | License: Apache-2.0 327 | Licensed under the Apache License, Version 2.0 (the "License"); 328 | you may not use this file except in compliance with the License. 329 | You may obtain a copy of the License at 330 | . 331 | http://www.apache.org/licenses/LICENSE-2.0 332 | . 333 | Unless required by applicable law or agreed to in writing, software 334 | distributed under the License is distributed on an "AS IS" BASIS, 335 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 336 | See the License for the specific language governing permissions and 337 | limitations under the License. 338 | . 339 | On Debian systems, the full text of the Apache License, Version 2.0 can be 340 | found in the file 341 | `/usr/share/common-licenses/Apache-2.0'. 342 | . 343 | Files in this directory were created in-house. 344 | 345 | Files: tests/utils/cptestcase.py 346 | Copyright: (c) 2014 Adam Hajari 347 | The MIT License (MIT) 348 | 349 | Permission is hereby granted, free of charge, to any person obtaining a copy 350 | of this software and associated documentation files (the "Software"), to deal 351 | in the Software without restriction, including without limitation the rights 352 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 353 | copies of the Software, and to permit persons to whom the Software is 354 | furnished to do so, subject to the following conditions: 355 | 356 | The above copyright notice and this permission notice shall be included in all 357 | copies or substantial portions of the Software. 358 | 359 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 360 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 361 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 362 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 363 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 364 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 365 | SOFTWARE. 366 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # kudos: 2 | # - https://medium.com/@exustash/three-good-practices-for-better-ci-cd-makefiles-5b93452e4cc3 3 | # - https://le-gall.bzh/post/makefile-based-ci-chain-for-go/ 4 | # - https://makefiletutorial.com/ 5 | # - https://www.cl.cam.ac.uk/teaching/0910/UnixTools/make.pdf 6 | # 7 | #SHELL := /usr/bin/bash # set default shell 8 | #.SHELLFLAGS = -c # Run commands in a -c flag 9 | 10 | .NOTPARALLEL: ; # wait for this target to finish 11 | .EXPORT_ALL_VARIABLES: ; # send all vars to shell 12 | 13 | .PHONY: all # All targets are accessible for user 14 | .DEFAULT: help # Running Make will run the help target 15 | 16 | BRANCH := $(shell git rev-parse --abbrev-ref HEAD) 17 | ifeq ($(BRANCH), HEAD) 18 | BRANCH := ${CI_BUILD_REF_NAME} 19 | endif 20 | 21 | # help: @ List available tasks of the project 22 | help: 23 | @grep -E '[a-zA-Z\.\-]+:.*?@ .*$$' $(MAKEFILE_LIST)| tr -d '#' | awk 'BEGIN {FS = ":.*?@ "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 24 | 25 | ## test section 26 | # All tests are called on "." if possible. 27 | # If this is not possible a special loop is used 28 | # to sum up all error codes. 29 | 30 | # test: @ Run all defined tests 31 | test: test-tab test-codespell test-shellcheck test-yamllint test-jsonlint test-salt 32 | @echo "All tests Done!" 33 | 34 | # test-tab: @ Run linting to find files containing tabspaces 35 | test-tab: 36 | @for file in $(shell find . -regextype egrep -regex '.*\.(sls|yml|yaml)' ! -path "**/venv/*"); do\ 37 | grep -q -P '\t' $${file} ;\ 38 | if [ "$$?" -eq 0 ]; then\ 39 | err_add=1 ;\ 40 | echo "Tab found in $${file}" ;\ 41 | grep -H -n -P '\t' $${file} ;\ 42 | else \ 43 | err_add=0 ;\ 44 | fi;\ 45 | err=$$((err_add + err)) ;\ 46 | done; exit $$err 47 | 48 | # test-codespell: @ Run spell check 49 | test-codespell: 50 | codespell -H -f -s -I .codespell.ignore.words -S $(shell cat .codespell.ignore.files) -C 4 -q 6 51 | 52 | # test-shellcheck: @ Run linting on all shell scripts 53 | test-shellcheck: 54 | for file in $(shell find . -name '*.sh' ! -path "**/venv/*"); do\ 55 | echo $${file} ;\ 56 | shellcheck -s bash -x $${file};\ 57 | err=$$(($$? + err)) ;\ 58 | done; exit $$err 59 | 60 | # test-yamllint: @ Run linting on all yaml files 61 | test-yamllint: 62 | # yamllint -c .yamllint.yaml -s . 63 | yamllint -c .yamllint.yaml . 64 | 65 | # test-jsonlint: @ Run linting on all json files 66 | test-jsonlint: 67 | for file in $(shell find . -name '*.json' ! -path "**/venv/*"); do\ 68 | echo $${file} ;\ 69 | jq << $${file} >/dev/null;\ 70 | err=$$(($$? + err)) ;\ 71 | done; exit $$err 72 | 73 | # test-mlc: @ Run markup link checker 74 | test-mlc: 75 | mkdir -p aws/.terraform # make sure ingore-path exists 76 | mlc --throttle 1000 77 | 78 | # test-salt: @ Run Salt Unit Tests 79 | test-salt: 80 | cp pillar.example example/pillar/hana.sls 81 | cp example/salt/top.sls . 82 | echo "===========================================" 83 | echo " Using primary host " 84 | echo "===========================================" 85 | cp ci/grains_hana01 grains 86 | cp ci/minion minion 87 | salt-call state.show_highstate --local --file-root=./ --config-dir=. --pillar-root=example/pillar --retcode-passthrough -l debug 88 | echo 89 | echo "===========================================" 90 | echo " Using secondary host " 91 | echo "===========================================" 92 | cp ci/grains_hana02 grains 93 | cp ci/minion minion 94 | salt-call state.show_highstate --local --file-root=./ --config-dir=. --pillar-root=example/pillar --retcode-passthrough -l debug 95 | 96 | 97 | # all: @ Runs everything 98 | all: test 99 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Formula CI](https://github.com/SUSE/saphanabootstrap-formula/actions/workflows/formulaCI.yml/badge.svg)](https://github.com/SUSE/saphanabootstrap-formula/actions/workflows/formulaCI.yml) 2 | 3 | # SAP HANA platform bootstrap Salt formula 4 | 5 | Salt formula to bootstrap and manage a multi SAP HANA platform environment. 6 | 7 | ## Features 8 | 9 | The formula provides the capability to create a multi node SAP HANA environment. Here are some of the features: 10 | 11 | - Install one or multiple SAP HANA instances (in one or multiple nodes) 12 | - Setup a System replication configuration between two SAP HANA nodes 13 | - Extract the required files from the provided `.tar`, `.sar`, `.exe` files 14 | - Apply saptune to the nodes with the needed SAP notes 15 | - Enable all of the pre-requirements to setup a HA cluster in top of SAP HANA system replication cluster 16 | - Install and configure the [handb_exporter](https://github.com/SUSE/hanadb_exporter) 17 | 18 | ## Installation 19 | 20 | The project can be installed in many ways, including but not limited to: 21 | 22 | 1. [RPM](#rpm) 23 | 2. [Manual clone](#manual-clone) 24 | 25 | ### RPM 26 | 27 | On openSUSE or SUSE Linux Enterprise use `zypper` package manager: 28 | 29 | ```shell 30 | zypper install saphanabootstrap-formula 31 | ``` 32 | 33 | **Important!** This will install the formula in `/usr/share/salt-formulas/states/hana`. In case the formula is used in a masterless mode, make sure that the `/usr/share/salt-formulas/states` entry is correctly configured in the `file_roots` entry of the Salt minion configuration. 34 | 35 | Depending on the patch level of the target system and the release cycle of this project, the package in the regular repository might not be the latest one. If you want the latest features, have a look in the test development repositories at SUSE's Open Build Service [network:ha-clustering:sap-deployments:devel/saphanabootstrap-formula](https://build.opensuse.org/package/show/network:ha-clustering:sap-deployments:devel/saphanabootstrap-formula). 36 | 37 | ### Manual Installation 38 | 39 | A manual installation can be done by cloning this repository: 40 | 41 | ``` 42 | git clone https://github.com/SUSE/saphanabootstrap-formula 43 | ``` 44 | 45 | **Important!** This will not install the the formula anywhere where salt can find it. If the formula is used in a masterless mode, also make sure to copy the complete `netweaver` subdirectory to location defined in `file_roots` entry of your Salt minion configuration. 46 | 47 | I. e.: 48 | 49 | ``` 50 | cd saphanabootstrap-formula 51 | cp -R hana /srv/salt 52 | ``` 53 | 54 | **Important!** The formulas depends on [salt-shaptools](https://github.com/SUSE/salt-shaptools) package. Make sure it is installed properly if you follow the manual installation (the package can be installed as a RPM package too). 55 | 56 | ## Usage 57 | 58 | Follow the next steps to configure the formula execution. After this, the formula can be executed using `master/minion` or `masterless` options: 59 | 60 | 1. Modify the `top.sls` file (by default stored in `/srv/salt`) including the `hana` entry. 61 | 62 | Here is an example to execute the HANA formula in all of the nodes: 63 | 64 | ``` 65 | # This file is /srv/salt/top.sls 66 | base: 67 | '*': 68 | - hana 69 | ``` 70 | 71 | 2. Customize the execution pillar file. Here an example of a pillar file for this formula with all of the options: [pillar.example](https://github.com/SUSE/saphanabootstrap-formula/blob/master/pillar.example) 72 | The `pillar.example` can be found either as a link to the file in the master branch or a file in the file system at `/usr/share/salt-formulas/metadata/hana/pillar.example`. 73 | 74 | 3. Set the execution pillar file. For that, modify the `top.sls` of the pillars (by default stored in `/srv/pillar`) including the `hana` entry and copy your specific `hana.sls` pillar file in the same folder. 75 | 76 | Here an example to apply the recently created `hana.sls` pillar file to all of the nodes: 77 | 78 | ``` 79 | # This file is /srv/pillar/top.sls 80 | base: 81 | '*': 82 | - hana 83 | ``` 84 | 85 | 4. Execute the formula. 86 | 87 | 1. Master/Minion execution. 88 | 89 | `salt '*' state.highstate` 90 | 91 | 2. Masterless execution. 92 | 93 | `salt-call --local state.highstate` 94 | 95 | **Important!** The hostnames and minion names of the HANA nodes must match the output of the `hostname` command. 96 | 97 | ### Salt pillar encryption 98 | 99 | Pillars are expected to contain private data such as user passwords required for the automated installation or other operations. Therefore, such pillar data need to be stored in an encrypted state, which can be decrypted during pillar compilation. 100 | 101 | SaltStack GPG renderer provides a secure encryption/decryption of pillar data. The configuration of GPG keys and procedure for pillar encryption are described in the Saltstack documentation guide: 102 | 103 | - [SaltStack pillar encryption](https://docs.saltstack.com/en/latest/topics/pillar/#pillar-encryption) 104 | 105 | - [SALT GPG RENDERERS](https://docs.saltstack.com/en/latest/ref/renderers/all/salt.renderers.gpg.html) 106 | 107 | **Note:** 108 | 109 | - Only passwordless gpg keys are supported, and the already existing keys cannot be used. 110 | 111 | - If a masterless approach is used (as in the current automated deployment) the gpg private key must be imported in all the nodes. This might require the copy/paste of the keys. 112 | 113 | ## OBS Packaging 114 | 115 | The CI automatically publishes new releases to SUSE's Open Build Service every time a pull request is merged into `master` branch. For that, update the new package version in [\_service](https://github.com/SUSE/saphanabootstrap-formula/blob/master/_service) and 116 | add the new changes in [saphanabootstrap-formula.changes](https://github.com/SUSE/saphanabootstrap-formula/blob/master/saphanabootstrap-formula.changes). 117 | 118 | The new version is published at: 119 | 120 | - https://build.opensuse.org/package/show/network:ha-clustering:sap-deployments:devel/saphanabootstrap-formula 121 | - https://build.opensuse.org/package/show/openSUSE:Factory/saphanabootstrap-formula (only if the spec file version is increased) 122 | -------------------------------------------------------------------------------- /_service: -------------------------------------------------------------------------------- 1 | 2 | 3 | https://github.com/%%REPOSITORY%%.git 4 | git 5 | .git 6 | saphanabootstrap-formula 7 | 0.14.0+git.%ct.%h 8 | %%VERSION%% 9 | 10 | 11 | 12 | *.tar 13 | gz 14 | 15 | 16 | 17 | saphanabootstrap-formula 18 | 19 | 20 | -------------------------------------------------------------------------------- /ci/grains_hana01: -------------------------------------------------------------------------------- 1 | host: hana01 2 | -------------------------------------------------------------------------------- /ci/grains_hana02: -------------------------------------------------------------------------------- 1 | host: hana02 2 | hana_inst_folder: myfold 3 | -------------------------------------------------------------------------------- /ci/minion: -------------------------------------------------------------------------------- 1 | root_dir: ./ 2 | id: ci 3 | -------------------------------------------------------------------------------- /example/deploy.sh: -------------------------------------------------------------------------------- 1 | cp -R pillar/* /srv/pillar 2 | cp ../pillar.example /srv/pillar/hana.sls 3 | mkdir -p /srv/salt/hana 4 | cp -R salt/* /srv/salt 5 | cp -R ../hana /srv/salt 6 | -------------------------------------------------------------------------------- /example/pillar/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | - hana 4 | -------------------------------------------------------------------------------- /example/salt/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | - hana 4 | -------------------------------------------------------------------------------- /form.yml: -------------------------------------------------------------------------------- 1 | --- 2 | hana: 3 | $name: HANA 4 | $type: group 5 | install_packages: 6 | $name: Install required packages 7 | $type: boolean 8 | $default: true 9 | $help: Install all required packages from currently existing repositories 10 | saptune_solution: 11 | $name: saptune solution to apply 12 | $type: text 13 | $default: HANA 14 | $help: saptune solution to apply to all nodes 15 | $optional: true 16 | software_path: 17 | $name: Path to HANA platform installation media folder 18 | $type: text 19 | $help: The path to already extracted HANA platform installation media folder which can be local or already mounted shared location (NFS, SMB, etc). This will have preference over hana installation media archive 20 | $optional: true 21 | use_hana_archive_file: 22 | $name: Use archive file for HANA platform installation 23 | $type: boolean 24 | $default: false 25 | $help: Mark this option if you want to use a hana archive file for the HANA installation 26 | hana_archive_file: 27 | $name: Path to HANA platform installation media archive 28 | $visibleIf: .use_hana_archive_file == true 29 | $type: text 30 | $help: The path to installation media archive in any of the RAR, ZIP, EXE or SAR format. For SAR archive, please also provide the sapcar executable path for extraction 31 | $optional: true 32 | hana_extract_dir: 33 | $name: Path to extract the HANA installation media archive 34 | $visibleIf: .use_hana_archive_file == true 35 | $type: text 36 | $default: /sapmedia_extract/HANA 37 | $help: The HANA archive will be extracted to this path. By default this path is /sapmedia_extract/HANA 38 | $optional: true 39 | sapcar_exe_file: 40 | $name: Path to sapcar executable if extracting HANA SAR archive 41 | $visibleIf: .use_hana_archive_file == true 42 | $type: text 43 | $help: The path to sapcar executable to extract HANA SAR archive 44 | $optional: true 45 | ha_enabled: 46 | $name: Enable HA cluster configuration 47 | $type: boolean 48 | $default: true 49 | $help: Enable the HA cluster configuration which will install the SAPHanaSR hook. To use this option the primary and secondary nodes must be defined in the pillar file 50 | scale_out: 51 | $name: Enable HANA scale-out deployment 52 | $type: boolean 53 | $default: false 54 | $help: Enable HANA scale-out deployment. To use this option the HANA roles must be defined in the pillar file. 55 | monitoring_enabled: 56 | $name: Enable the host to be monitored by exporters 57 | $type: boolean 58 | $default: false 59 | $help: Enable the node monitoring via exporters which will be installed and configured in all the nodes. Customize the exporter configuration in each node's dedicated sections. 60 | ha_dr_sustkover_enabled: 61 | $name: Enable HANA HA/DR provdider hook susTkOver 62 | $type: boolean 63 | $default: false 64 | $help: See https://documentation.suse.com/sbp/all/single-html/SLES4SAP-hana-sr-guide-PerfOpt-15/#cha.s4s.hana-hook for details. 65 | ha_dr_suschksrv_enabled: 66 | $name: Enable HANA HA/DR provdider hook susChkSrv 67 | $type: boolean 68 | $default: false 69 | $help: See https://documentation.suse.com/sbp/all/single-html/SLES4SAP-hana-sr-guide-PerfOpt-15/#cha.s4s.hana-hook for details. 70 | ha_dr_suschksrv_action_on_lost: 71 | $name: Configure "Action on lost" for HANA HA/DR provider hook susChkSrv 72 | $type: select 73 | $values: [stop, fence] 74 | $help: See `man 7 susChkSrv.py` and https://documentation.suse.com/sbp/all/single-html/SLES4SAP-hana-sr-guide-PerfOpt-15/#cha.s4s.hana-hook for details. 75 | nodes: 76 | $name: Nodes 77 | $type: edit-group 78 | $minItems: 1 79 | $itemName: "" 80 | $prototype: 81 | host: 82 | $name: Hostname to install HANA 83 | $type: text 84 | $optional: false 85 | sid: 86 | $name: HANA system identifier (SID) 87 | $type: text 88 | $optional: false 89 | $help: System ID of the HANA installation 90 | instance: 91 | $name: HANA instance number 92 | $type: text 93 | $optional: false 94 | password: 95 | $name: SAP user password 96 | $type: password 97 | $optional: false 98 | $help: This is the OS adm user password. Will be used to check if there is an already installed HANA. 99 | scenario_type: 100 | $name: HANA scenario type 101 | $type: select 102 | $values: [performance-optimized, cost-optimized] 103 | cost_optimized_parameters: 104 | $name: HANA parameters for cost-optimized 105 | $visibleIf: .scenario_type == cost-optimized 106 | $optional: true 107 | $type: group 108 | global_allocation_limit: 109 | $name: HANA memory allocation limit 110 | $type: text 111 | $optional: false 112 | $help: This is the memory size limit to be set for HANA in Mb 113 | preload_column_tables: 114 | $name: HANA preload column tables 115 | $type: boolean 116 | $default: false 117 | $optional: false 118 | $help: Choose this option to set the HANA behavior of preloading column tables on startup 119 | install_checkbox: 120 | $name: Install HANA 121 | $type: boolean 122 | $default: true 123 | install: 124 | $name: Install new HANA instance 125 | $visibleIf: .install_checkbox == true 126 | $optional: true 127 | $type: group 128 | local_software_path_checkbox: 129 | $name: Use local HANA installation media 130 | $type: boolean 131 | $default: false 132 | $help: Specify the installation media on this node, otherwise global software path will be used. 133 | software_path: 134 | $name: Path to local HANA installation media 135 | $visibleIf: .local_software_path_checkbox == true 136 | $type: text 137 | $optional: false 138 | $help: The path to already extracted HANA platform installation media folder which can be local or already mounted shared location (NFS, SMB, etc). This will have preference over global software path. 139 | root_user: 140 | $name: Machine root user 141 | $type: text 142 | $optional: false 143 | root_password: 144 | $name: Machine root password 145 | $type: password 146 | $optional: false 147 | use_config_file: 148 | $name: Use configuration file 149 | $type: boolean 150 | $default: false 151 | $help: Mark this option if you want to use a custom config file for the HANA installation options 152 | config_file: 153 | $name: Configuration file 154 | $visibleIf: .use_config_file == true 155 | $type: text 156 | $help: Path to the config file location. The template can be generated with the hdblcm --dump_configfile_template option 157 | use_hdb_pwd_file: 158 | $name: Fetch HANA passwords from XML file 159 | $type: boolean 160 | $default: false 161 | $help: Mark this option if you want to fetch HANA passwords from XML file for the HANA installation options 162 | hdb_pwd_file: 163 | $name: Path to XML file with HANA Passwords 164 | $visibleIf: .use_hdb_pwd_file == true 165 | $type: text 166 | $help: Path to the XML file location. The password template can be generated with the hdblcm --dump_configfile_template option 167 | sapadm_password: 168 | $name: SAP admin password (adm) 169 | $visibleIf: .use_config_file == false 170 | $type: password 171 | $help: The password of the SAP administrator user 172 | system_user_password: 173 | $name: SAP SYSTEM user password 174 | $visibleIf: .use_config_file == false 175 | $type: password 176 | $help: The password of the database SYSTEM (superuser) user 177 | extra_parameters: 178 | $name: Installation extra options 179 | $help: Optional configuration parameters (exact name as in the config file) 180 | $optional: true 181 | $type: edit-group 182 | $itemName: "" 183 | $prototype: 184 | $name: Extra parameter 185 | key: 186 | $name: Value to update 187 | value: 188 | $name: New value 189 | system_replication: 190 | $type: group 191 | system_replication_options: 192 | $name: System replication options 193 | $type: select 194 | $values: [None, Primary, Secondary] 195 | primary: 196 | $visibleIf: .system_replication#system_replication_options == Primary 197 | $optional: true 198 | $type: group 199 | name: 200 | $name: Primary Site Name 201 | $type: text 202 | $optional: false 203 | create_backup: 204 | $name: Create new database backup 205 | $type: boolean 206 | $default: false 207 | $help: Mark if you want the execute the database backup. Mandatory before enabling the System Replication 208 | backup: 209 | $name: Backup 210 | $optional: true 211 | $visibleIf: .create_backup == true 212 | $type: group 213 | key_name: 214 | $name: Key Name (hdbuserstore) 215 | $type: text 216 | $optional: true 217 | user_name: 218 | $name: SAP user 219 | $type: text 220 | $optional: true 221 | user_password: 222 | $name: SAP user password 223 | $type: password 224 | $optional: true 225 | database: 226 | $name: Database name to backup 227 | $type: text 228 | $optional: false 229 | $placeholder: SYSTEMDB 230 | file: 231 | $name: Backup file name 232 | $type: text 233 | $optional: false 234 | $help: File name for the backup that will be created 235 | create_userkey: 236 | $name: Create new key (hdbuserstore) 237 | $type: boolean 238 | $default: false 239 | $help: Mark this option to create a new key into the hdbuserstore 240 | userkey: 241 | $name: User key (hdbuserstore) 242 | $optional: true 243 | $visibleIf: .create_userkey == true 244 | $type: group 245 | key_name: 246 | $name: New key name 247 | $type: text 248 | $optional: false 249 | environment: 250 | $name: Environment 251 | $type: text 252 | $optional: false 253 | $help: Use the format : 254 | user_name: 255 | $name: SAP user 256 | $type: text 257 | $default: SYSTEM 258 | $optional: false 259 | user_password: 260 | $name: SAP user password 261 | $type: password 262 | $optional: false 263 | database: 264 | $name: Database name 265 | $type: text 266 | $default: SYSTEMDB 267 | $optional: false 268 | secondary: 269 | $visibleIf: .system_replication#system_replication_options == Secondary 270 | $optional: true 271 | $name: Secondary node 272 | $type: group 273 | name: 274 | $name: Secondary site name 275 | $type: text 276 | $optional: false 277 | remote_host: 278 | $name: Primary node hostname 279 | $type: text 280 | $optional: false 281 | $help: Hostname of the primary HANA instance to connect the system replication 282 | remote_instance: 283 | $name: Primary node instance number 284 | $type: text 285 | $optional: false 286 | $help: Instance number of the primary HANA instance to connect the system replication 287 | replication_mode: 288 | $name: Replication mode 289 | $type: select 290 | $values: [sync, syncmem, async] 291 | operation_mode: 292 | $name: Operation mode 293 | $type: select 294 | $values: [logreplay, delta_datashipping] 295 | primary_timeout: 296 | $name: Timeout to wait until the primary node is enabled 297 | $type: text 298 | $optional: true 299 | interval: 300 | $name: Interval used to check if the primary instance is ready 301 | $type: text 302 | $optional: true 303 | 304 | add_exporter: 305 | $name: Add SAP HANA database metrics exporter 306 | $type: boolean 307 | $default: false 308 | $help: Mark if you want add the SAP HANA database metrics exporter 309 | exporter: 310 | $name: SAP HANA database metrics exporter 311 | $optional: true 312 | $visibleIf: .add_exporter == true 313 | $type: group 314 | exposition_port: 315 | $name: SAP HANA exporter exposition port 316 | $type: text 317 | $optional: false 318 | multi_tenant: 319 | $name: Enable Multi tenant monitoring 320 | $type: boolean 321 | $default: true 322 | $help: If enabled, the exporter will select all the tenants listed on the SYSTEMDB metadata and open a connection to each of them in order to collect metrics 323 | user: 324 | $name: SAP HANA user 325 | $type: text 326 | $default: SYSTEM 327 | $optional: false 328 | password: 329 | $name: SAP HANA password 330 | $type: password 331 | $optional: false 332 | port: 333 | $name: HANA database port 334 | $type: text 335 | $default: 30013 336 | $optional: false 337 | $help: If multi tenant is enabled, this port must point to a SYSTEMDB database port (3XX13 be default) 338 | timeout: 339 | $name: Connection timeout 340 | $type: text 341 | $default: 30 342 | $optional: true 343 | $help: Timeout in seconds for HANA database connection 344 | -------------------------------------------------------------------------------- /hana/defaults.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | hana: 3 | install_packages: true 4 | ha_enabled: true 5 | scale_out: false 6 | hana_extract_dir: /sapmedia_extract/HANA 7 | hana_client_extract_dir: /sapmedia_extract/HANA_CLIENT 8 | nodes: [] 9 | monitoring_enabled: false 10 | -------------------------------------------------------------------------------- /hana/enable_cost_optimized.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {%- from 'hana/macros/get_hana_client_path.sls' import get_hana_client_path with context %} 3 | 4 | {% set host = grains['host'] %} 5 | 6 | {% for node in hana.nodes %} 7 | {%- set hana_client_path = get_hana_client_path(hana, node) %} 8 | {% if node.host == host and node.scenario_type is defined and node.scenario_type.lower() == 'cost-optimized' and node.cost_optimized_parameters is defined%} 9 | 10 | reduce_memory_resources_{{ node.host+node.sid }}: 11 | hana.memory_resources_updated: 12 | - name: {{ node.host }} 13 | - global_allocation_limit: {{ node.cost_optimized_parameters.global_allocation_limit }} 14 | - preload_column_tables: {{ node.cost_optimized_parameters.preload_column_tables }} 15 | - user_name: SYSTEM 16 | {% if node.install.system_user_password is defined %} 17 | - user_password: {{ node.install.system_user_password }} 18 | {% endif %} 19 | - sid: {{ node.sid }} 20 | - inst: {{ node.instance }} 21 | - password: {{ node.password }} 22 | - require: 23 | - hana_install_{{ node.host+node.sid }} 24 | 25 | {% if node.host == host and node.secondary is defined %} 26 | 27 | setup_srHook_directory: 28 | file.directory: 29 | - name: /hana/shared/srHook 30 | - user: {{ node.sid.lower() }}adm 31 | - group: sapsys 32 | - mode: 755 33 | - makedirs: True 34 | - require: 35 | - reduce_memory_resources_{{ node.host+node.sid }} 36 | 37 | install_srCostOptMemConfig_hook: 38 | file.managed: 39 | - source: salt://hana/templates/srCostOptMemConfig_hook.j2 40 | - name: /hana/shared/srHook/srCostOptMemConfig.py 41 | - user: {{ node.sid.lower() }}adm 42 | - group: sapsys 43 | - mode: 755 44 | - template: jinja 45 | - require: 46 | - setup_srHook_directory 47 | 48 | {% set platform = grains['cpuarch'].upper() %} 49 | {% if platform not in ['X86_64', 'PPC64LE'] %} 50 | failure: 51 | test.fail_with_changes: 52 | - name: 'not supported platform. only x86_64 and ppc64le are supported' 53 | - failhard: True 54 | {% endif %} 55 | 56 | configure_ha_dr_provider_srCostOptMemConfig: 57 | module.run: 58 | - hana.set_ini_parameter: 59 | - ini_parameter_values: 60 | - section_name: 'ha_dr_provider_srCostOptMemConfig' 61 | parameter_name: 'provider' 62 | parameter_value: 'srCostOptMemConfig' 63 | - section_name: 'ha_dr_provider_srCostOptMemConfig' 64 | parameter_name: 'path' 65 | parameter_value: '/hana/shared/srHook' 66 | - section_name: 'ha_dr_provider_srCostOptMemConfig' 67 | parameter_name: 'execution_order' 68 | parameter_value: '2' 69 | - database: SYSTEMDB 70 | - file_name: global.ini 71 | - layer: SYSTEM 72 | - reconfig: True 73 | - user_name: SYSTEM 74 | - user_password: {{ node.password }} 75 | - password: {{ node.password }} 76 | - sid: {{ node.sid }} 77 | - inst: {{ node.instance }} 78 | - require: 79 | - reduce_memory_resources_{{ node.host+node.sid }} 80 | - setup_srHook_directory 81 | - install_srCostOptMemConfig_hook 82 | {% endif %} 83 | {% endif %} 84 | {% endfor %} 85 | -------------------------------------------------------------------------------- /hana/enable_primary.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {% set host = grains['host'] %} 3 | 4 | {% for node in hana.nodes if node.host == host and node.primary is defined %} 5 | 6 | {{ node.primary.name }}: 7 | hana.sr_primary_enabled: 8 | - sid: {{ node.sid }} 9 | - inst: {{ node.instance }} 10 | - password: {{ node.password }} 11 | {% if node.primary.userkey is defined %} 12 | - userkey: 13 | - key_name: {{ node.primary.userkey.key_name }} 14 | - environment: {{ node.primary.userkey.environment }} 15 | - user_name: {{ node.primary.userkey.user_name }} 16 | - user_password: {{ node.primary.userkey.user_password }} 17 | - database: {{ node.primary.userkey.database }} 18 | {% endif %} 19 | {% if node.primary.backup is defined %} 20 | - backup: 21 | {% if node.primary.backup.key_name is defined %} 22 | - key_name: {{ node.primary.backup.key_name }} 23 | {% else %} 24 | - user_name: {{ node.primary.backup.user_name }} 25 | - user_password: {{ node.primary.backup.user_password }} 26 | {% endif %} 27 | - database: {{ node.primary.backup.database }} 28 | - file: {{ node.primary.backup.file }} 29 | {% endif %} 30 | - require: 31 | - hana_install_{{ node.host+node.sid }} 32 | 33 | {% endfor %} 34 | -------------------------------------------------------------------------------- /hana/enable_secondary.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {% set host = grains['host'] %} 3 | 4 | {% for node in hana.nodes if node.host == host and node.secondary is defined %} 5 | 6 | # The primary password is retrieved in this order 7 | # 1. If the primary node is defined in the pillar, primary password will be used 8 | # 2. If secondary.primary_pass is defined this password will be used 9 | # 3. The secondary machine password will be used 10 | {% set password = {} %} 11 | 12 | {% for prim_node in hana.nodes if node.secondary.remote_host == prim_node.host and prim_node.primary is defined %} 13 | {% do password.update({'primary': prim_node.password }) %} 14 | {% endfor %} 15 | 16 | {% if password.primary is not defined and node.secondary.primary_password is defined %} 17 | {% do password.update({'primary': node.secondary.primary_password }) %} 18 | {% elif password.primary is not defined %} 19 | {% do password.update({'primary': node.password }) %} 20 | {% endif %} 21 | 22 | {{ node.secondary.name }}: 23 | hana.sr_secondary_registered: 24 | - sid: {{ node.sid }} 25 | - inst: {{ node.instance }} 26 | - password: {{ node.password }} 27 | - remote_host: {{ node.secondary.remote_host }} 28 | - remote_instance: {{ node.secondary.remote_instance }} 29 | - replication_mode: {{ node.secondary.replication_mode }} 30 | - operation_mode: {{ node.secondary.operation_mode }} 31 | - timeout: {{ node.secondary.primary_timeout|default(100) }} 32 | - interval: {{ node.secondary.interval|default(10) }} 33 | - primary_pass: {{ password.primary }} 34 | - require: 35 | - hana_install_{{ node.host+node.sid }} 36 | 37 | {% endfor %} 38 | -------------------------------------------------------------------------------- /hana/extract_hana_package.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | 3 | {%- if hana.hana_archive_file is defined %} 4 | {% set hana_package = hana.hana_archive_file %} 5 | {% set hana_extract_dir = hana.hana_extract_dir %} 6 | 7 | setup_hana_extract_directory: 8 | file.directory: 9 | - name: {{ hana_extract_dir }} 10 | - mode: 755 11 | - makedirs: True 12 | 13 | {%- if hana_package.endswith((".ZIP", ".zip", ".RAR", ".rar")) %} 14 | 15 | extract_hana_archive: 16 | archive.extracted: 17 | - name: {{ hana_extract_dir }} 18 | - enforce_toplevel: False 19 | - source: {{ hana_package }} 20 | 21 | {%- elif hana_package.endswith((".exe", ".EXE")) %} 22 | 23 | {% set unrar_package = 'unrar_wrapper' if grains['osrelease_info'][0] == 15 else 'unrar' %} 24 | install_unrar_package: 25 | pkg.installed: 26 | - name: {{ unrar_package }} 27 | 28 | # unrar tool does not have the option to skip extracting top-level directory when using multipart exe archives# 29 | extract_hana_multipart_archive: 30 | cmd.run: 31 | - name: unrar x {{ hana_package }} 32 | - cwd: {{ hana_extract_dir }} 33 | - require: 34 | - install_unrar_package 35 | 36 | {%- elif hana_package.endswith((".sar", ".SAR")) and hana.sapcar_exe_file is defined %} 37 | 38 | extract_hdbserver_sar_archive: 39 | sapcar.extracted: 40 | - name: {{ hana_package }} 41 | - sapcar_exe: {{ hana.sapcar_exe_file }} 42 | - output_dir: {{ hana_extract_dir }} 43 | - options: "-manifest SIGNATURE.SMF" 44 | 45 | copy_signature_file_to_installer_dir: 46 | file.copy: 47 | - source: {{ hana_extract_dir }}/SIGNATURE.SMF 48 | - name: {{ hana_extract_dir }}/SAP_HANA_DATABASE/SIGNATURE.SMF 49 | - preserve: True 50 | - force: True 51 | - require: 52 | - extract_hdbserver_sar_archive 53 | 54 | {%- endif %} 55 | {%- endif %} 56 | 57 | {%- if hana.hana_client_archive_file is defined and hana.hana_client_archive_file.endswith((".sar", ".SAR")) and hana.sapcar_exe_file is defined %} 58 | extract_hana_client_sar_archive: 59 | sapcar.extracted: 60 | - name: {{ hana.hana_client_archive_file }} 61 | - sapcar_exe: {{ hana.sapcar_exe_file }} 62 | - output_dir: {{ hana.hana_client_extract_dir }} 63 | - options: "-manifest SIGNATURE.SMF" 64 | {% endif %} 65 | -------------------------------------------------------------------------------- /hana/ha_cluster.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {% set host = grains['host'] %} 3 | 4 | {% if hana.scale_out %} 5 | {% set hook_path = '/usr/share/SAPHanaSR-ScaleOut' %} 6 | 7 | remove_SAPHanaSR: 8 | pkg.removed: 9 | - pkgs: 10 | - SAPHanaSR 11 | - SAPHanaSR-doc 12 | 13 | install_SAPHanaSR: 14 | pkg.installed: 15 | - pkgs: 16 | - SAPHanaSR-ScaleOut 17 | - SAPHanaSR-ScaleOut-doc 18 | 19 | {% else %} 20 | {% set hook_path = '/usr/share/SAPHanaSR' %} 21 | 22 | remove_SAPHanaSR: 23 | pkg.removed: 24 | - pkgs: 25 | - SAPHanaSR-ScaleOut 26 | - SAPHanaSR-ScaleOut-doc 27 | 28 | install_SAPHanaSR: 29 | pkg.installed: 30 | - pkgs: 31 | - SAPHanaSR 32 | - SAPHanaSR-doc 33 | {% endif %} 34 | 35 | {% set sr_hook_multi_target = hook_path + '/SAPHanaSrMultiTarget.py' %} 36 | {% set sr_hook = hook_path + '/SAPHanaSR.py' %} 37 | {% set sustkover_hook = hook_path + '/susTkOver.py' %} 38 | {% set suschksrv_hook = hook_path + '/susChkSrv.py' %} 39 | 40 | {% set sustkover_hook_enabled = hana.ha_dr_sustkover_enabled|default(False) %} 41 | {% set suschksrv_hook_enabled = hana.ha_dr_suschksrv_enabled|default(False) %} 42 | {% set suschksrv_hook_action_on_lost = hana.ha_dr_suschksrv_action_on_lost|default('stop') %} 43 | 44 | # get HANA sites 45 | {% set sites = {} %} 46 | {% for node in hana.nodes %} 47 | {% if node.primary is defined %} 48 | {% do sites.update({'a': node.primary.name}) %} 49 | {% elif node.secondary is defined %} 50 | {% do sites.update({'b': node.secondary.name}) %} 51 | {% endif %} 52 | {% endfor %} 53 | 54 | {% for node in hana.nodes if node.host == host %} 55 | 56 | {% set instance = '{:0>2}'.format(node.instance) %} 57 | {% set sap_instance = '{}_{}'.format(node.sid, instance) %} 58 | 59 | # Update sudoers to allow crm operations to the sidadm 60 | {% set sudoers = '/etc/sudoers.d/SAPHanaSR' %} 61 | 62 | sudoers_create_{{ sap_instance }}: 63 | file.managed: 64 | - source: salt://hana/templates/ha_cluster_sudoers.j2 65 | - name: {{ sudoers }} 66 | - template: jinja 67 | - user: root 68 | - group: root 69 | - mode: 0440 70 | - check_cmd: /usr/sbin/visudo -c -f 71 | - require: 72 | - pkg: install_SAPHanaSR 73 | - context: 74 | sid: {{ node.sid }} 75 | sites: {{ sites }} 76 | sr_hook: {{ sr_hook }} 77 | sr_hook_multi_target: {{ sr_hook_multi_target }} 78 | sr_hook_string: __slot__:salt:file.grep({{ sr_hook }}, "^srHookGen = ").stdout 79 | sustkover_hook: {{ sustkover_hook }} 80 | 81 | # remove old entries from /etc/sudoers (migration to new /etc/sudoers.d/SAPHanaSR file) 82 | sudoers_remove_old_entries_{{ sap_instance }}_srHook: 83 | file.replace: 84 | - name: /etc/sudoers 85 | - pattern: '.*({{ node.sid.lower() }}(adm|_(glob|site)).*(SOK|srHook)|SAPHanaSR.*needs).*' 86 | - repl: '' 87 | 88 | # Add SAPHANASR hook 89 | 90 | # Only add hook if hana was installed (not on scale-out standby/workers). A restart is needed as secondary cannot register a new hook without this (e.g. via hdbsql). 91 | {% if node.install is defined %} 92 | configure_ha_hook_{{ sap_instance }}_multi_target: 93 | ini.options_present: 94 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 95 | - separator: '=' 96 | - strict: False # do not touch rest of file 97 | - sections: 98 | ha_dr_provider_SAPHanaSrMultiTarget: 99 | provider: 'SAPHanaSrMultiTarget' 100 | path: '{{ hook_path }}' 101 | execution_order: '1' 102 | trace: 103 | ha_dr_saphanasrmultitarget: 'info' 104 | - require: 105 | - pkg: install_SAPHanaSR 106 | - onlyif: 107 | - test -f {{ sr_hook_multi_target }} 108 | 109 | configure_ha_hook_{{ sap_instance }}: 110 | ini.options_present: 111 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 112 | - separator: '=' 113 | - strict: False # do not touch rest of file 114 | - sections: 115 | ha_dr_provider_SAPHanaSR: 116 | provider: 'SAPHanaSR' 117 | path: '{{ hook_path }}' 118 | execution_order: '1' 119 | trace: 120 | ha_dr_saphanasr: 'info' 121 | - require: 122 | - pkg: install_SAPHanaSR 123 | - unless: 124 | - test -f {{ sr_hook_multi_target }} 125 | 126 | remove_wrong_ha_hook_{{ sap_instance }}_sections_multi_target: 127 | ini.sections_absent: 128 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 129 | - separator: '=' 130 | - sections: 131 | ha_dr_provider_SAPHanaSR: 132 | - require: 133 | - pkg: install_SAPHanaSR 134 | - onlyif: 135 | - test -f {{ sr_hook_multi_target }} 136 | 137 | remove_wrong_ha_hook_{{ sap_instance }}_options_multi_target: 138 | ini.options_absent: 139 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 140 | - separator: '=' 141 | - sections: 142 | trace: 143 | - ha_dr_saphanasr 144 | - require: 145 | - pkg: install_SAPHanaSR 146 | - onlyif: 147 | - test -f {{ sr_hook_multi_target }} 148 | 149 | remove_wrong_ha_hook_{{ sap_instance }}_sections: 150 | ini.sections_absent: 151 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 152 | - separator: '=' 153 | - sections: 154 | ha_dr_provider_SAPHanaSrMultiTarget: 155 | - require: 156 | - pkg: install_SAPHanaSR 157 | - unless: 158 | - test -f {{ sr_hook_multi_target }} 159 | 160 | remove_wrong_ha_hook_{{ sap_instance }}_options: 161 | ini.options_absent: 162 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 163 | - separator: '=' 164 | - sections: 165 | trace: 166 | - ha_dr_saphanasrmultitarget 167 | - require: 168 | - pkg: install_SAPHanaSR 169 | - unless: 170 | - test -f {{ sr_hook_multi_target }} 171 | 172 | configure_susTkOver_hook_{{ sap_instance }}: 173 | ini.options_present: 174 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 175 | - separator: '=' 176 | - strict: False # do not touch rest of file 177 | - sections: 178 | ha_dr_provider_sustkover: 179 | provider: 'susTkOver' 180 | path: '{{ hook_path }}' 181 | execution_order: '2' 182 | trace: 183 | ha_dr_sustkover: 'info' 184 | - require: 185 | - pkg: install_SAPHanaSR 186 | - onlyif: 187 | - test -f {{ sustkover_hook }} 188 | - test "True" == "{{ sustkover_hook_enabled }}" 189 | 190 | configure_susChkSrv_hook_{{ sap_instance }}: 191 | ini.options_present: 192 | - name: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 193 | - separator: '=' 194 | - strict: False # do not touch rest of file 195 | - sections: 196 | ha_dr_provider_suschksrv: 197 | provider: 'susChkSrv' 198 | path: '{{ hook_path }}' 199 | execution_order: '3' 200 | action_on_lost: '{{ suschksrv_hook_action_on_lost }}' 201 | trace: 202 | ha_dr_suschksrv: 'info' 203 | - require: 204 | - pkg: install_SAPHanaSR 205 | - onlyif: 206 | - test -f {{ suschksrv_hook }} 207 | - test "True" == "{{ suschksrv_hook_enabled }}" 208 | 209 | # Configure system replication operation mode in the primary site 210 | {% for secondary_node in hana.nodes if node.primary is defined and secondary_node.secondary is defined and secondary_node.secondary.remote_host == host %} 211 | configure_replication_{{ sap_instance }}: 212 | module.run: 213 | - hana.set_ini_parameter: 214 | - ini_parameter_values: 215 | - section_name: 'system_replication' 216 | parameter_name: 'operation_mode' 217 | parameter_value: '{{ secondary_node.secondary.operation_mode }}' 218 | - database: SYSTEMDB 219 | - file_name: global.ini 220 | - layer: SYSTEM 221 | - reconfig: True 222 | - user_name: SYSTEM 223 | - user_password: {{ node.password }} 224 | - password: {{ node.password }} 225 | - sid: {{ node.sid }} 226 | - inst: {{ node.instance }} 227 | {% endfor %} 228 | 229 | # Stop SAP Hana - Only needed if global.ini was edited directelly (removed old hooks). 230 | stop_hana_{{ sap_instance }}: 231 | module.run: 232 | - hana.stop: 233 | - sid: {{ node.sid }} 234 | - inst: {{ node.instance }} 235 | - password: {{ node.password }} 236 | - require: 237 | - hana_install_{{ node.host+node.sid }} 238 | - onchanges: 239 | - ini: /hana/shared/{{ node.sid.upper() }}/global/hdb/custom/config/global.ini 240 | 241 | # Start SAP Hana 242 | start_hana_{{ sap_instance }}: 243 | module.run: 244 | - hana.start: 245 | - sid: {{ node.sid }} 246 | - inst: {{ node.instance }} 247 | - password: {{ node.password }} 248 | - require: 249 | - hana_install_{{ node.host+node.sid }} 250 | {%- endif %} 251 | 252 | {% endfor %} 253 | -------------------------------------------------------------------------------- /hana/init.sls: -------------------------------------------------------------------------------- 1 | {% from "hana/map.jinja" import hana with context %} 2 | 3 | include: 4 | {% if hana.install_packages is sameas true %} 5 | - hana.packages 6 | {% endif %} 7 | - hana.pre_validation 8 | - hana.saptune 9 | - hana.extract_hana_package 10 | - hana.install 11 | - hana.enable_primary 12 | - hana.enable_secondary 13 | {% if hana.ha_enabled %} 14 | - hana.ha_cluster 15 | {% endif %} 16 | {% if hana.monitoring_enabled %} 17 | - hana.monitoring 18 | {% endif %} 19 | -------------------------------------------------------------------------------- /hana/install.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {%- from 'hana/macros/get_hana_exe_extract_dir.sls' import get_hana_exe_extract_dir with context %} 3 | {% set host = grains['host'] %} 4 | {% set hana_extract_dir = get_hana_exe_extract_dir(hana) %} 5 | 6 | include: 7 | - .enable_cost_optimized 8 | 9 | {% for node in hana.nodes if node.host == host %} 10 | {% if node.install is defined %} 11 | 12 | {% set instance = '{:0>2}'.format(node.instance) %} 13 | {% set sap_instance = '{}_{}'.format(node.sid, instance) %} 14 | {% if node.install.extra_parameters is defined and node.install.extra_parameters|length > 0 %} 15 | {%set extra_parameters = True %} 16 | {%set extra_parameters_items = node.install.extra_parameters.items() %} 17 | {% else %} 18 | {%set extra_parameters = False %} 19 | {%set extra_parameters_items = [] %} 20 | {% endif %} 21 | 22 | hana_install_{{ node.host+node.sid }}: 23 | hana.installed: 24 | - name: {{ node.sid }} 25 | - inst: {{ node.instance }} 26 | - password: {{ node.password }} 27 | - software_path: {{ node.install.software_path|default(hana.software_path)|default(hana_extract_dir) }} 28 | - root_user: {{ node.install.root_user }} 29 | - root_password: {{ node.install.root_password }} 30 | {% if node.install.config_file is defined %} 31 | - config_file: {{ node.install.config_file }} 32 | {% endif %} 33 | {% if node.install.hdb_pwd_file is defined %} 34 | - hdb_pwd_file: {{ node.install.hdb_pwd_file }} 35 | {% else %} 36 | - system_user_password: {{ node.install.system_user_password }} 37 | - sapadm_password: {{ node.install.sapadm_password }} 38 | {% endif %} 39 | - extra_parameters: 40 | - hostname: {{ node.host }} 41 | {% if extra_parameters %} 42 | {% for key,value in extra_parameters_items %} 43 | {% if key != 'addhosts' %} # exclude addhosts (scale-out) 44 | - {{ key }}: {{ value }} 45 | {% endif %} 46 | {% endfor %} 47 | {% endif %} 48 | # needed to utilize pwd file for add_hosts 49 | - remove_pwd_files: False 50 | 51 | # scale-out specific 52 | {% for key,value in extra_parameters_items %} 53 | {% if key == 'addhosts' %} 54 | 55 | # SAP Note 2080991 56 | {% if not hana.basepath_shared|default(True) %} 57 | disable_basepath_shared_{{ sap_instance }}: 58 | module.run: 59 | - hana.set_ini_parameter: 60 | - ini_parameter_values: 61 | - section_name: 'persistence' 62 | parameter_name: 'basepath_shared' 63 | parameter_value: 'no' 64 | - database: SYSTEMDB 65 | - file_name: global.ini 66 | - layer: SYSTEM 67 | - reconfig: True 68 | - user_name: SYSTEM 69 | - user_password: {{ node.password }} 70 | - password: {{ node.password }} 71 | - sid: {{ node.sid }} 72 | - inst: {{ node.instance }} 73 | {% endif %} 74 | 75 | # add scale-out nodes 76 | hana_add_hosts_{{ node.host+node.sid }}: 77 | module.run: 78 | - hana.add_hosts: 79 | - add_hosts: {{ value }} 80 | - hdblcm_folder: /hana/shared/{{ node.sid.upper() }}/hdblcm 81 | - root_user: {{ node.install.root_user }} 82 | - root_password: {{ node.install.root_password }} 83 | - hdb_pwd_file: /root/hdb_passwords.xml 84 | # only run after initial install (password file still exists) 85 | - onlyif: 86 | - test -f /root/hdb_passwords.xml 87 | 88 | {% endif %} 89 | {% endfor %} 90 | 91 | # see "remove_pwd_files: False" above 92 | hana_add_hosts_pwd_file_remove_{{ node.host+node.sid }}: 93 | file.absent: 94 | - name: /root/hdb_passwords.xml 95 | 96 | {% else %} # node.install not defined 97 | # make sure /hana/{data,log}/${SID} exists on nodes where install does not run 98 | 99 | create_hana_data_{{ node.sid.upper() }}: 100 | file.directory: 101 | - name: /hana/data/{{ node.sid.upper() }} 102 | # - user: {{ node.sid.lower() }}adm # user might not exist yet 103 | # - group: sapsys # group might not exist yet 104 | - mode: 750 105 | - makedirs: True 106 | 107 | create_hana_log_{{ node.sid.upper() }}: 108 | file.directory: 109 | - name: /hana/log/{{ node.sid.upper() }} 110 | # - user: {{ node.sid.lower() }}adm # user might not exist yet 111 | # - group: sapsys # group might not exist yet 112 | - mode: 750 113 | - makedirs: True 114 | 115 | {% endif %} 116 | {% endfor %} 117 | -------------------------------------------------------------------------------- /hana/macros/get_hana_client_path.sls: -------------------------------------------------------------------------------- 1 | {% macro get_hana_client_path(hana, node) -%} 2 | {%- from 'hana/macros/get_hana_exe_extract_dir.sls' import get_hana_exe_extract_dir with context %} 3 | {#- If hana archive used for installation is sar format, it will not contain the hana client, so we need to provide a hana client #} 4 | {#- One of the following paths is used for hana client based on pillar entries: 1. hana_client_software_path 2. hana_client_extract_dir 3. hana_extract_dir #} 5 | {%- if hana.hana_client_software_path is defined %} 6 | {%- set hana_client_path = hana.hana_client_software_path %} 7 | {%- elif hana.hana_client_archive_file is defined %} 8 | {%- set hana_client_path = hana.hana_client_extract_dir %} 9 | {%- elif hana.hana_archive_file is defined %} 10 | {%- set hana_client_path = get_hana_exe_extract_dir(hana) %} 11 | {%- else %} 12 | {%- if node.install is defined %} 13 | {%- set hana_client_path = node.install.software_path|default(hana.software_path) %} 14 | {%- else %} 15 | {%- set hana_client_path = hana.software_path %} 16 | {%- endif %} 17 | {%- endif %} 18 | {{- hana_client_path }} 19 | {%- endmacro %} 20 | -------------------------------------------------------------------------------- /hana/macros/get_hana_exe_extract_dir.sls: -------------------------------------------------------------------------------- 1 | {% macro get_hana_exe_extract_dir(hana) -%} 2 | {%- set hana_extract_dir = hana.hana_extract_dir %} 3 | {#- Below is temporary workaround to update the software installation path when using unrar for HANA multipart rar archive#} 4 | {#- TODO: Find better solution to set or detect the correct extraction path when extracting multipart rar archive#} 5 | {#- Below logic finds the extraction location based on name of multipart exe archive filename#} 6 | {%- if hana.hana_archive_file is defined and hana.hana_archive_file.endswith((".exe", ".EXE")) %} 7 | {%- set archive_base_name = salt['file.basename']( hana.hana_archive_file.split('.')[0]) %} 8 | {%- set archive_name = archive_base_name.split('_')[0] %} 9 | {%- set hana_extract_dir = hana_extract_dir| path_join(archive_name) %} 10 | {%- endif %} 11 | {{- hana_extract_dir }} 12 | {%- endmacro %} -------------------------------------------------------------------------------- /hana/map.jinja: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # vim: ft=jinja 3 | 4 | {% import_yaml "hana/defaults.yaml" as defaults %} 5 | {% set hana = salt['pillar.get']('hana', default=defaults.hana, merge=True) %} 6 | -------------------------------------------------------------------------------- /hana/monitoring.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {%- from 'hana/macros/get_hana_client_path.sls' import get_hana_client_path with context %} 3 | 4 | {% for node in hana.nodes if node.host == grains['host'] %} 5 | 6 | {%- set pydbapi_output_dir = '/tmp/pydbapi' %} 7 | {%- set hana_client_path = get_hana_client_path(hana, node ) %} 8 | 9 | # only run if exporter is defined 10 | {% if node.exporter is defined and node.instance is defined and node.sid is defined %} 11 | {% set exporter = node.exporter|default(None) %} 12 | 13 | {% set sap_instance_nr = '{:0>2}'.format(node.instance) %} 14 | {% set exporter_instance = '{}_HDB{}'.format(node.sid.upper(), sap_instance_nr) %} 15 | 16 | install_python_pip: 17 | pkg.installed: 18 | - name: python3-pip 19 | - retry: 20 | attempts: 3 21 | interval: 15 22 | - resolve_capabilities: true 23 | 24 | extract_pydbapi_client: 25 | hana.pydbapi_extracted: 26 | - name: PYDBAPI.TGZ 27 | - software_folders: [{{ hana_client_path }}] 28 | - output_dir: {{ pydbapi_output_dir }} 29 | - hana_version: '20' 30 | - force: true 31 | 32 | # pip.installed cannot manage file names with regular expressions 33 | # TODO: Improve this to use pip.installed somehow 34 | install_pydbapi_client: 35 | cmd.run: 36 | - name: /usr/bin/python3 -m pip install {{ pydbapi_output_dir }}/hdbcli-*.tar.gz 37 | - require: 38 | - install_python_pip 39 | - extract_pydbapi_client 40 | 41 | prometheus-hanadb_exporter: 42 | pkg.installed: 43 | - retry: 44 | attempts: 3 45 | interval: 15 46 | - require: 47 | - install_pydbapi_client 48 | 49 | hanadb_exporter_configuration_{{ exporter_instance }}: 50 | file.managed: 51 | - source: salt://hana/templates/hanadb_exporter.j2 52 | - name: /usr/etc/hanadb_exporter/{{ exporter_instance }}.json 53 | - template: jinja 54 | - mode: '0700' 55 | - require: 56 | - prometheus-hanadb_exporter 57 | - context: 58 | exporter: {{ exporter|yaml }} 59 | sap_instance_nr: "{{ sap_instance_nr }}" 60 | exporter_instance: {{ exporter_instance }} 61 | 62 | {% if hana.ha_enabled %} 63 | {% set service_status = "disabled" %} 64 | {% set service_enabled = False %} 65 | {% else %} 66 | {% set service_status = "running" %} 67 | {% set service_enabled = True %} 68 | {% endif %} 69 | 70 | hanadb_exporter_service_{{ exporter_instance }}: 71 | service.{{ service_status }}: 72 | - name: prometheus-hanadb_exporter@{{ exporter_instance }} 73 | - enable: {{ service_enabled }} 74 | {% if service_enabled %} 75 | - watch: 76 | - file: hanadb_exporter_configuration_{{ exporter_instance }} 77 | {% endif %} 78 | 79 | {% endif %} 80 | {% endfor %} 81 | -------------------------------------------------------------------------------- /hana/packages.sls: -------------------------------------------------------------------------------- 1 | #required packages to install SAP HANA 2 | 3 | {% set pattern_available = 1 %} 4 | {% if grains['os_family'] == 'Suse' %} 5 | {% set pattern_available = salt['cmd.retcode']('zypper search patterns-sap-hana') %} 6 | {% endif %} 7 | 8 | {% if pattern_available == 0 %} 9 | # refresh is disabled to avoid errors during the call 10 | {% set repo = salt['pkg.info_available']('patterns-sap-hana', refresh=False)['patterns-sap-hana']['repository'] %} 11 | patterns-sap-hana: 12 | pkg.installed: 13 | - fromrepo: {{ repo }} 14 | - retry: 15 | attempts: 3 16 | interval: 15 17 | # SAPHanaSR-ScaleOut conflicts with patterns-sap-hana and will be uninstalled (which will affect a running cluster) 18 | - unless: rpm -q SAPHanaSR-ScaleOut 19 | 20 | {% else %} 21 | install_required_packages: 22 | pkg.installed: 23 | - retry: 24 | attempts: 3 25 | interval: 15 26 | - pkgs: 27 | - libnuma1 28 | - libltdl7 29 | 30 | {% endif %} 31 | 32 | # Install shaptools depending on the os and python version 33 | {% if grains['pythonversion'][0] == 2 %} 34 | python-shaptools: 35 | {% else %} 36 | python3-shaptools: 37 | {% endif %} 38 | pkg.installed: 39 | - retry: 40 | attempts: 3 41 | interval: 15 42 | - resolve_capabilities: true 43 | -------------------------------------------------------------------------------- /hana/pre_validation.sls: -------------------------------------------------------------------------------- 1 | {% from "hana/map.jinja" import hana with context -%} 2 | 3 | {% set host = grains['host'] %} 4 | 5 | {# Check HANA archive media checkbox #} 6 | {% if hana.use_hana_archive_file is defined and hana.use_hana_archive_file == false %} 7 | {% do hana.pop('hana_archive_file', none) %} 8 | {% endif %} 9 | 10 | {% for node in hana.nodes if node.host == host %} 11 | 12 | {# Check HANA install checkbox #} 13 | {% if node.install_checkbox is defined and node.install_checkbox == false %} 14 | 15 | {% do node.pop('install') %} 16 | 17 | {% elif node.install_checkbox is defined and node.install_checkbox == true %} 18 | {% if node.install.use_config_file == false %} 19 | {% do node.install.pop('config_file') %} 20 | {% endif %} 21 | 22 | {% if node.install.local_software_path_checkbox == false %} 23 | {% do node.install.pop('software_path') %} 24 | {% endif %} 25 | 26 | {% if node.install.use_hdb_pwd_file == false %} 27 | {% do node.install.pop('hdb_pwd_file') %} 28 | {% endif %} 29 | 30 | {% if node.install.extra_parameters is defined and node.install.extra_parameters|length > 0 and node.install.extra_parameters is not mapping %} 31 | {% set new_extra_parameters = {} %} 32 | {% for new_item in node.install.extra_parameters %} 33 | {% do new_extra_parameters.update({new_item.key: new_item.value}) %} 34 | {% endfor %} 35 | {% do node.install.update({'extra_parameters': new_extra_parameters}) %} 36 | {% endif %} 37 | {% endif %} 38 | {# Check HANA install checkbox finish #} 39 | 40 | {# Check HANA Scenario type #} 41 | {% if node.scenario_type is defined and node.scenario_type != "cost-optimized" %} 42 | {% do node.pop('cost_optimized_parameters') %} 43 | {% endif %} 44 | {# Check HANA Scenario type finish #} 45 | 46 | {# Check HANA System replication mode #} 47 | {% if node.system_replication is defined %} 48 | {% if node.system_replication.system_replication_options is defined and node.system_replication.system_replication_options != "Secondary" %} 49 | {% do node.pop('secondary') %} 50 | {% endif %} 51 | 52 | {% if node.system_replication.system_replication_options is defined and node.system_replication.system_replication_options != "Primary" %} 53 | {% do node.pop('primary') %} 54 | {% else %} 55 | {% if node.primary.create_backup == false %} 56 | {% do node.primary.pop('backup') %} 57 | {% endif %} 58 | {% if node.primary.create_userkey == false %} 59 | {% do node.primary.pop('userkey') %} 60 | {% endif %} 61 | 62 | {% endif %} 63 | {% endif %} 64 | {# Check HANA System replication mode finish #} 65 | 66 | {# Check HANA exporter #} 67 | {% if node.add_exporter is defined and node.add_exporter == false %} 68 | {% do node.pop('exporter') %} 69 | {% endif %} 70 | {# Check HANA exporter finish #} 71 | 72 | {% endfor %} 73 | -------------------------------------------------------------------------------- /hana/saptune.sls: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {% set host = grains['host'] %} 3 | 4 | {% for node in hana.nodes if node.host == host and (hana.saptune_solution is defined or node.saptune_solution is defined) %} 5 | 6 | {% set saptune_solution = node.saptune_solution|default(hana.saptune_solution) %} 7 | {% set instance = '{:0>2}'.format(node.instance) %} 8 | {% set name = '{}_{}'.format(node.sid, instance) %} 9 | 10 | apply_saptune_solution_{{ host }}_{{ name }}: 11 | saptune.solution_applied: 12 | - name: {{ saptune_solution }} 13 | 14 | # Start the saptune systemd service to ensure the system is well-tuned after a system reboot 15 | start_saptune_service_{{ host }}_{{ name }}: 16 | cmd.run: 17 | - name: saptune daemon start 18 | 19 | {% endfor %} 20 | -------------------------------------------------------------------------------- /hana/templates/cluster_resources.j2: -------------------------------------------------------------------------------- 1 | {%- set data = pillar.cluster.configure.template.parameters %} 2 | {%- set sid = data.sid.upper() %} 3 | {%- set instance = '{:0>2}'.format(data.instance) %} 4 | {%- set cloud_provider = grains['cloud_provider'] %} 5 | {%- set monitoring_enabled = pillar.cluster.monitoring_enabled|default(False) %} 6 | 7 | {%- set scale_out = data.scale_out|default(False)%} 8 | {%- set majority_maker = data.majority_maker|default("")%} 9 | {%- if scale_out %} 10 | {%- set SAPHanaResource = "SAPHanaController" %} 11 | {%- else %} 12 | {%- set SAPHanaResource = "SAPHana" %} 13 | {%- endif %} 14 | 15 | {%- if cloud_provider == "amazon-web-services" %} 16 | {%- set native_fencing = data.native_fencing|default(True) %} 17 | {%- set vip_mechanism = data.virtual_ip_mechanism|default("route") %} 18 | {%- elif cloud_provider == "google-cloud-platform" %} 19 | {%- set native_fencing = data.native_fencing|default(True) %} 20 | {%- set vip_mechanism = data.virtual_ip_mechanism|default("load-balancer") %} 21 | {%- elif cloud_provider == "microsoft-azure" %} 22 | {%- set native_fencing = data.native_fencing|default(False) %} 23 | {%- set vip_mechanism = data.virtual_ip_mechanism|default("load-balancer") %} 24 | {%- else %}{# all other cases like openstack and libvirt #} 25 | {%- set native_fencing = data.native_fencing|default(False) %} 26 | {%- set vip_mechanism = data.virtual_ip_mechanism|default("vip-only") %} 27 | {%- endif %} 28 | {%- set cidr_netmask = "cidr_netmask="~data.virtual_ip_mask|default("32") %} 29 | {%- set nic = "nic="~pillar.cluster.interface|json if pillar.cluster.interface is defined else "" %} 30 | {%- set interface = "interface="~pillar.cluster.interface|default('eth0')|json %} 31 | 32 | ########### 33 | # Defaults 34 | ########### 35 | 36 | rsc_defaults \ 37 | resource-stickiness="1000" \ 38 | {%- if scale_out %} 39 | migration-threshold="50" 40 | {%- else %} 41 | migration-threshold="5000" 42 | {%- endif %} 43 | 44 | op_defaults \ 45 | timeout="600" 46 | 47 | ##################################################### 48 | # Fencing agents - Native agents for cloud providers 49 | ##################################################### 50 | 51 | {%- if native_fencing %} 52 | {%- if cloud_provider == "amazon-web-services" %} 53 | property $id="cib-bootstrap-options" \ 54 | stonith-enabled="true" \ 55 | stonith-action="off" \ 56 | stonith-timeout="150s" 57 | 58 | primitive rsc_aws_stonith_{{ sid }}_HDB{{ instance }} stonith:external/ec2 \ 59 | params tag={{ data.instance_tag }} profile={{ data.cluster_profile }} \ 60 | op start interval=0 timeout=180 \ 61 | op stop interval=0 timeout=180 \ 62 | op monitor interval=120 timeout=60 \ 63 | meta target-role=Started 64 | 65 | {%- elif cloud_provider == "google-cloud-platform" %} 66 | 67 | property $id="cib-bootstrap-options" \ 68 | stonith-enabled="true" \ 69 | stonith-timeout="150s" 70 | 71 | # This stonith resource and location will be duplicated for each node in the cluster 72 | primitive rsc_gcp_stonith_{{ sid }}_HDB{{ instance }}_{{ grains['host'] }} stonith:fence_gce \ 73 | params plug={{ grains['gcp_instance_name'] }} pcmk_host_map="{{ grains['host'] }}:{{ grains['gcp_instance_name'] }}" \ 74 | meta target-role=Started 75 | location loc_gcp_stonith_{{ sid }}_HDB{{ instance }}_{{ grains['host'] }} rsc_gcp_stonith_{{ sid }}_HDB{{ instance }}_{{ grains['host'] }} -inf: {{ grains['host'] }} 76 | 77 | {%- elif cloud_provider == "microsoft-azure" %} 78 | property $id="cib-bootstrap-options" \ 79 | stonith-enabled="true" \ 80 | concurrent-fencing=true 81 | 82 | primitive rsc_azure_stonith_{{ sid }}_HDB{{ instance }} stonith:fence_azure_arm \ 83 | params subscriptionId={{ data.azure_subscription_id }} resourceGroup={{ data.azure_resource_group_name }} tenantId={{ data.azure_tenant_id }} login={{ data.azure_fence_agent_app_id }} passwd="{{ data.azure_fence_agent_client_secret }}" pcmk_monitor_retries=4 pcmk_action_limit=3 power_timeout=240 pcmk_reboot_timeout=900 \ 84 | op monitor interval=3600 timeout=120 \ 85 | meta target-role=Started 86 | 87 | {%- endif %} 88 | {%- endif %} 89 | 90 | ###################################### 91 | # Floating IP address resource agents 92 | ###################################### 93 | 94 | {%- if cloud_provider == "amazon-web-services" %} 95 | 96 | {%- if vip_mechanism == "route" %} 97 | primitive rsc_ip_{{ sid }}_HDB{{ instance }} ocf:suse:aws-vpc-move-ip \ 98 | params ip={{ data.virtual_ip }} routing_table={{ data.route_table }} \ 99 | {{ interface }} profile={{ data.cluster_profile }} \ 100 | op start interval=0 timeout=180 \ 101 | op stop interval=0 timeout=180 \ 102 | op monitor interval=60 timeout=60 103 | 104 | {%- if data.virtual_ip_secondary is defined %} 105 | primitive rsc_ip_{{ sid }}_HDB{{ instance }}_readenabled ocf:suse:aws-vpc-move-ip \ 106 | params ip={{ data.virtual_ip_secondary }} routing_table={{ data.route_table }} \ 107 | {{ interface }} profile={{ data.cluster_profile }} \ 108 | op start interval=0 timeout=180 \ 109 | op stop interval=0 timeout=180 \ 110 | op monitor interval=60 timeout=60 111 | {%- endif %} 112 | {%- endif %} 113 | 114 | {%- elif cloud_provider == "google-cloud-platform" %} 115 | 116 | {%- if vip_mechanism == "load-balancer" %} 117 | primitive rsc_socat_{{ sid }}_HDB{{ instance }} anything \ 118 | params binfile="/usr/bin/socat" \ 119 | cmdline_options="-U TCP-LISTEN:625{{ instance }},backlog=10,fork,reuseaddr /dev/null" \ 120 | op monitor timeout=20s interval=10 \ 121 | op_params depth=0 122 | 123 | {%- if data.virtual_ip_secondary is defined %} 124 | primitive rsc_socat_{{ sid }}_HDB{{ instance }}_readenabled anything \ 125 | params binfile="/usr/bin/socat" \ 126 | cmdline_options="-U TCP-LISTEN:626{{ instance }},backlog=10,fork,reuseaddr /dev/null" \ 127 | op monitor timeout=20s interval=10 \ 128 | op_params depth=0 129 | {%- endif %} 130 | 131 | {%- elif vip_mechanism == "route" %} 132 | primitive rsc_ip_{{ sid }}_HDB{{ instance }} ocf:heartbeat:gcp-vpc-move-route \ 133 | params ip={{ data.virtual_ip }} vpc_network={{ data.vpc_network_name }} route_name={{ data.route_name }} \ 134 | op start interval=0 timeout=180 \ 135 | op stop interval=0 timeout=180 \ 136 | op monitor interval=60 timeout=60 137 | 138 | {%- if data.virtual_ip_secondary is defined %} 139 | primitive rsc_ip_{{ sid }}_HDB{{ instance }}_readenabled ocf:heartbeat:gcp-vpc-move-route \ 140 | params ip={{ data.virtual_ip_secondary }} vpc_network={{ data.vpc_network_name }} route_name={{ data.route_name_secondary }} \ 141 | op start interval=0 timeout=180 \ 142 | op stop interval=0 timeout=180 \ 143 | op monitor interval=60 timeout=60 144 | {%- endif %} 145 | {%- endif %} 146 | 147 | {%- elif cloud_provider == "microsoft-azure" %} 148 | 149 | {%- if vip_mechanism == "load-balancer" %} 150 | primitive rsc_socat_{{ sid }}_HDB{{ instance }} azure-lb \ 151 | params port=625{{ instance }} \ 152 | op monitor timeout="20" interval="10" depth="0" \ 153 | meta resource-stickiness=0 154 | 155 | {%- if data.virtual_ip_secondary is defined %} 156 | primitive rsc_socat_{{ sid }}_HDB{{ instance }}_readenabled azure-lb \ 157 | params port=626{{ instance }} \ 158 | op monitor timeout="20" interval="10" depth="0" 159 | {%- endif %} 160 | 161 | {%- endif %} 162 | 163 | {%- endif %} 164 | 165 | {%- if vip_mechanism == "vip-only" or vip_mechanism == "load-balancer" %} 166 | primitive rsc_ip_{{ sid }}_HDB{{ instance }} ocf:heartbeat:IPaddr2 \ 167 | params ip={{ data.virtual_ip }} {{ cidr_netmask }} {{ nic }} \ 168 | op monitor interval=10s timeout=20s 169 | {%- endif %} 170 | 171 | {%- if vip_mechanism == "vip-only" or vip_mechanism == "route"%} 172 | colocation col_saphana_ip_{{ sid }}_HDB{{ instance }} 2000: rsc_ip_{{ sid }}_HDB{{ instance }}:Started msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }}:Master 173 | 174 | {%- elif vip_mechanism == "load-balancer" %} 175 | group g_ip_{{ sid }}_HDB{{ instance }} rsc_ip_{{ sid }}_HDB{{ instance }} rsc_socat_{{ sid }}_HDB{{ instance }} 176 | colocation col_saphana_ip_{{ sid }}_HDB{{ instance }} 4000: g_ip_{{ sid }}_HDB{{ instance }}:Started msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }}:Master 177 | {%- endif %} 178 | 179 | {%- if data.virtual_ip_secondary is defined %} 180 | {%- if vip_mechanism == "vip-only" or vip_mechanism == "load-balancer" %} 181 | primitive rsc_ip_{{ sid }}_HDB{{ instance }}_readenabled ocf:heartbeat:IPaddr2 \ 182 | params ip={{ data.virtual_ip_secondary }} {{ cidr_netmask }} {{ nic }} \ 183 | op monitor interval=10s timeout=20s 184 | {%- endif %} 185 | 186 | {%- if vip_mechanism == "vip-only" or vip_mechanism == "route"%} 187 | colocation col_saphana_ip_{{ sid }}_HDB{{ instance }}_readenabled 2000: rsc_ip_{{ sid }}_HDB{{ instance }}_readenabled:Started msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }}:Slave 188 | 189 | {%- elif vip_mechanism == "load-balancer" %} 190 | group g_ip_{{ sid }}_HDB{{ instance }}_readenabled rsc_ip_{{ sid }}_HDB{{ instance }}_readenabled rsc_socat_{{ sid }}_HDB{{ instance }}_readenabled 191 | colocation col_saphana_ip_{{ sid }}_HDB{{ instance }}_readenabled 4000: g_ip_{{ sid }}_HDB{{ instance }}_readenabled:Started msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }}:Slave 192 | {%- endif %} 193 | 194 | {%- endif %} 195 | 196 | ##################### 197 | # SAP HANA resources 198 | ##################### 199 | 200 | primitive rsc_SAPHanaTopology_{{ sid }}_HDB{{ instance }} ocf:suse:SAPHanaTopology \ 201 | params \ 202 | SID="{{ sid }}" \ 203 | InstanceNumber="{{ instance }}" \ 204 | op monitor interval="10" timeout="600" \ 205 | op start interval="0" timeout="600" \ 206 | op stop interval="0" timeout="300" 207 | 208 | clone cln_SAPHanaTopology_{{ sid }}_HDB{{ instance }} rsc_SAPHanaTopology_{{ sid }}_HDB{{ instance }} \ 209 | meta is-managed="true" clone-node-max="1" interleave="true" 210 | 211 | primitive rsc_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }} ocf:suse:{{ SAPHanaResource }} \ 212 | params \ 213 | SID="{{ sid }}" \ 214 | InstanceNumber="{{ instance }}" \ 215 | PREFER_SITE_TAKEOVER="{{ data.prefer_takeover }}" \ 216 | AUTOMATED_REGISTER="{{ data.auto_register }}" \ 217 | DUPLICATE_PRIMARY_TIMEOUT="7200" \ 218 | op start interval="0" timeout="3600" \ 219 | op stop interval="0" timeout="3600" \ 220 | op promote interval="0" timeout="3600" \ 221 | op monitor interval="60" role="Master" timeout="700" \ 222 | op monitor interval="61" role="Slave" timeout="700" 223 | 224 | ms msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }} rsc_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }} \ 225 | {%- if scale_out %} 226 | meta master-max="1" clone-node-max="1" interleave="true" 227 | {%- else %} 228 | meta clone-max="2" clone-node-max="1" interleave="true" 229 | {%- endif %} 230 | 231 | order ord_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }} Optional: cln_SAPHanaTopology_{{ sid }}_HDB{{ instance }} msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }} 232 | 233 | ####################################### 234 | # non-production HANA - Cost optimized 235 | ####################################### 236 | 237 | {%- if data.cost_optimized_parameters is defined %} 238 | {%- set qas_sid = data.cost_optimized_parameters.sid.upper() %} 239 | {%- set qas_instance = '{:0>2}'.format(data.cost_optimized_parameters.instance) %} 240 | {%- set qas_remote_host = data.cost_optimized_parameters.remote_host %} 241 | 242 | primitive rsc_SAP_{{ qas_sid }}_HDB{{ qas_instance }} ocf:heartbeat:SAPDatabase \ 243 | params DBTYPE="HDB" SID="{{ qas_sid }}" \ 244 | MONITOR_SERVICES="hdbindexserver|hdbnameserver" \ 245 | op start interval="0" timeout="600" \ 246 | op monitor interval="120" timeout="700" \ 247 | op stop interval="0" timeout="300" \ 248 | meta priority="100" 249 | 250 | location loc_{{ qas_sid }}_never_on_{{ qas_remote_host }} rsc_SAP_{{ qas_sid }}_HDB{{ qas_instance }} -inf: {{ qas_remote_host }} 251 | 252 | colocation col_{{ qas_sid }}_never_with_{{ sid }}-ip -inf: rsc_SAP_{{ qas_sid }}_HDB{{ qas_instance }}:Started \ 253 | rsc_ip_{{ sid }}_HDB{{ instance }} 254 | 255 | order ord_{{ qas_sid }}_stop_before_{{ sid }}-promote Mandatory: rsc_SAP_{{ qas_sid }}_HDB{{ qas_instance }}:stop \ 256 | msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }}:promote 257 | 258 | {%- endif %} 259 | 260 | ###################################### 261 | # prometheus-hanadb_exporter resource 262 | ###################################### 263 | 264 | {%- if monitoring_enabled %} 265 | 266 | primitive rsc_exporter_{{ sid }}_HDB{{ instance }} systemd:prometheus-hanadb_exporter@{{ sid }}_HDB{{ instance }} \ 267 | op start interval=0 timeout=100 \ 268 | op stop interval=0 timeout=100 \ 269 | op monitor interval=10 \ 270 | meta resource-stickiness=0 \ 271 | meta target-role=Started 272 | 273 | colocation col_exporter_{{ sid }}_HDB{{ instance }} -inf: rsc_exporter_{{ sid }}_HDB{{ instance }}:Started msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }}:Slave 274 | 275 | {%- endif %} 276 | 277 | {%- if majority_maker != "" and majority_maker != None and majority_maker != "None" %} 278 | ###################################### 279 | # majority maker 280 | ###################################### 281 | location {{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }}_not_on_majority_maker msl_{{ SAPHanaResource }}_{{ sid }}_HDB{{ instance }} -inf: {{ majority_maker }} 282 | location SAPHanaTopology_{{ sid }}_HDB{{ instance }}_not_on_majority_maker cln_SAPHanaTopology_{{ sid }}_HDB{{ instance }} -inf: {{ majority_maker }} 283 | {%- if monitoring_enabled %} 284 | location exporter_{{ sid }}_HDB{{ instance }}_not_on_majority_maker rsc_exporter_{{ sid }}_HDB{{ instance }} -inf: {{ majority_maker }} 285 | {%- endif %} 286 | {%- endif %} 287 | -------------------------------------------------------------------------------- /hana/templates/ha_cluster_sudoers.j2: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {%- if hana.scale_out -%} 3 | # SAPHanaSR-ScaleOut needs for {{ sr_hook_multi_target }} 4 | Cmnd_Alias GSH_QUERY = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_gsh -G 5 | Cmnd_Alias GSH_UPDATE = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_gsh -v {{ sr_hook_string.split('"')[1]|default("1.0") }} -l reboot 6 | # be compatible with non-multi-target mode 7 | Cmnd_Alias SOK_GLOB = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_glob_srHook -v SOK -t crm_config -s SAPHanaSR 8 | Cmnd_Alias SFAIL_GLOB = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_glob_srHook -v SFAIL -t crm_config -s SAPHanaSR 9 | # be compatible with multi-target mode 10 | Cmnd_Alias SOK_GLOB_MTS = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_glob_mts -v SOK -t crm_config -s SAPHanaSR 11 | Cmnd_Alias SFAIL_GLOB_MTS = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_glob_mts -v SFAIL -t crm_config -s SAPHanaSR 12 | Cmnd_Alias SOK_SITEA = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['a'] }} -v SOK -t crm_config -s SAPHanaSR 13 | Cmnd_Alias SFAIL_SITEA = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['a'] }} -v SFAIL -t crm_config -s SAPHanaSR 14 | Cmnd_Alias SOK_SITEB = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['b'] }} -v SOK -t crm_config -s SAPHanaSR 15 | Cmnd_Alias SFAIL_SITEB = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['b'] }} -v SFAIL -t crm_config -s SAPHanaSR 16 | {{ sid.lower() }}adm ALL=(ALL) NOPASSWD: GSH_QUERY, GSH_UPDATE, SOK_GLOB, SFAIL_GLOB, SOK_GLOB_MTS, SFAIL_GLOB_MTS, SOK_SITEA, SFAIL_SITEA, SOK_SITEB, SFAIL_SITEB 17 | {%- else %} 18 | # SAPHanaSR needs for {{ sr_hook }} 19 | Cmnd_Alias SOK_SITEA = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['a'] }} -v SOK -t crm_config -s SAPHanaSR 20 | Cmnd_Alias SFAIL_SITEA = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['a'] }} -v SFAIL -t crm_config -s SAPHanaSR 21 | Cmnd_Alias SOK_SITEB = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['b'] }} -v SOK -t crm_config -s SAPHanaSR 22 | Cmnd_Alias SFAIL_SITEB = /usr/sbin/crm_attribute -n hana_{{ sid.lower() }}_site_srHook_{{ sites['b'] }} -v SFAIL -t crm_config -s SAPHanaSR 23 | {{ sid.lower() }}adm ALL=(ALL) NOPASSWD: SOK_SITEA, SFAIL_SITEA, SOK_SITEB, SFAIL_SITEB 24 | {%- endif %} 25 | # SAPHanaSR takeover blocker needs for {{ sustkover_hook }} 26 | Cmnd_Alias HOOK_HELPER_TKOVER = /usr/sbin/SAPHanaSR-hookHelper --case checkTakeover --sid={{ sid.lower() }} 27 | {{ sid.lower() }}adm ALL=(ALL) NOPASSWD: HOOK_HELPER_TKOVER 28 | -------------------------------------------------------------------------------- /hana/templates/hanadb_exporter.j2: -------------------------------------------------------------------------------- 1 | {# context is coming from monitoring.sls #} 2 | { 3 | "exposition_port": {{ exporter.exposition_port|default(9668) }}, 4 | "multi_tenant": {{ exporter.multi_tenant|default(true)|tojson }}, 5 | "timeout": {{exporter.timeout|default(30)}}, 6 | "hana": { 7 | "host": "{{ grains['host'] }}", 8 | {%- if exporter.multi_tenant|default(true) and exporter.port is not defined %} 9 | "port": 3{{ sap_instance_nr }}13, 10 | {%- else %} 11 | "port": {{ exporter.port }}, 12 | {%- endif %} 13 | "user": "{{ exporter.user }}", 14 | "password": "{{ exporter.password }}" 15 | }, 16 | "logging": { 17 | "config_file": "/usr/etc/hanadb_exporter/logging_config.ini", 18 | "log_file": "/var/log/hanadb_exporter_{{ exporter_instance }}.log" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /hana/templates/srCostOptMemConfig_hook.j2: -------------------------------------------------------------------------------- 1 | {%- from "hana/map.jinja" import hana with context -%} 2 | {%- set host = grains['host'] %} 3 | 4 | {%- for node in hana.nodes %} 5 | {%- if node.host == host and node.secondary is defined and node.scenario_type.lower() == 'cost-optimized' %} 6 | 7 | {%- set dbsid = node.sid.lower() %} 8 | {%- set dbinst = '{:0>2}'.format(node.instance) %} 9 | 10 | {%- for prim_node in hana.nodes %} 11 | {%- if node.secondary.remote_host == prim_node.host and prim_node.primary is defined %} 12 | 13 | {%- set dbuser = prim_node.primary.userkey.user_name %} 14 | {%- set dbpwd = prim_node.password %} 15 | {%- set dbversion = salt['hana.get_version'](sid=dbsid, inst=dbinst, password=dbpwd) %} 16 | {%- if salt['pkg.version_cmp'](dbversion, '2.0') < 0 %} 17 | {%- set dbport = ('3'~ dbinst ~'15')|int %} 18 | {%- else %} 19 | {%- set dbport = ('3'~ dbinst ~'13')|int %} 20 | {%- endif -%} 21 | 22 | """ 23 | HA/DR hook {haDrCostOptMem} for method srPostTakeover() 24 | 25 | This hook is used when deploying a "Cost Optimized Scenario". 26 | It makes sure to reconfigure the primary database after a takeover. 27 | 28 | The following changes to global.ini are needed to activea this hook. 29 | 30 | [ha_dr_provider_{haDrCostOptMem}] 31 | provider = {haDrCostOptMem} 32 | path = /hana/shared/srHook/ 33 | execution_order = 2 34 | 35 | For all hooks, 0 must be returned in case of success. 36 | 37 | Set the following variables: 38 | * dbinst Instance Number [e.g. 00 - 99 ] 39 | * dbuser Username [ e.g. SYSTEM ] 40 | * dbpwd 41 | * user password [ e.g. SLES4sap ] 42 | * dbport port where db listens for SQL connections [e.g 30013 or 30015] 43 | """ 44 | # 45 | # parameter section 46 | # 47 | dbuser="{{ dbuser }}" 48 | dbpwd="{{ dbpwd }}" 49 | dbinst="{{ dbinst }}" 50 | dbport="{{ dbport }}" 51 | 52 | {%- endif %} 53 | {%- endfor %} 54 | {%- endif %} 55 | {%- endfor %} 56 | 57 | # 58 | # prepared SQL statements to remove memory allocation limit 59 | # and pre-load of column tables 60 | # 61 | stmnt1 = "ALTER SYSTEM ALTER CONFIGURATION ('global.ini','SYSTEM') UNSET ('memorymanager','global_allocation_limit') WITH RECONFIGURE" 62 | stmnt2 = "ALTER SYSTEM ALTER CONFIGURATION ('global.ini','SYSTEM') UNSET ('system_replication','preload_column_tables') WITH RECONFIGURE" 63 | # 64 | # loading classes and libraries 65 | # 66 | import os, time 67 | from hdbcli import dbapi 68 | from hdb_ha_dr.client import HADRBase, Helper 69 | # 70 | # class definition srCostOptMemConfig 71 | # 72 | class srCostOptMemConfig(HADRBase): 73 | def __init__(self, *args, **kwargs): 74 | # delegate construction to base class 75 | super(srCostOptMemConfig, self).__init__(*args, **kwargs) 76 | 77 | def about(self): 78 | return {"provider_company" : "", 79 | "provider_name" : "srCostOptMemConfig", # provider name = class name 80 | "provider_description" : "Replication takeover script to set parameters to default.", 81 | "provider_version" : "1.0"} 82 | 83 | def postTakeover(self, rc, **kwargs): 84 | """Post takeover hook.""" 85 | self.tracer.info("%s.postTakeover method called with rc=%s" % (self.__class__.__name__, rc)) 86 | if rc == 0: 87 | # normal takeover succeeded 88 | conn = dbapi.connect('localhost',dbport,dbuser,dbpwd) 89 | cursor = conn.cursor() 90 | cursor.execute(stmnt1) 91 | cursor.execute(stmnt2) 92 | return 0 93 | elif rc == 1: 94 | # waiting for force takeover 95 | conn = dbapi.connect('localhost',dbport,dbuser,dbpwd) 96 | cursor = conn.cursor() 97 | cursor.execute(stmnt1) 98 | cursor.execute(stmnt2) 99 | return 0 100 | elif rc == 2: 101 | # error, something went wrong 102 | return 0 103 | -------------------------------------------------------------------------------- /metadata.yml: -------------------------------------------------------------------------------- 1 | --- 2 | description: 3 | SAP HANA deployment formula 4 | group: 5 | SAP deployment 6 | -------------------------------------------------------------------------------- /pillar.example: -------------------------------------------------------------------------------- 1 | hana: 2 | # optional: Install required rpm packages to install SAP HANA (true by default), 3 | # e.g. SAP Notes 2892356 and 3029056. 4 | # This will be taken care of by `patterns-sap-hana` in SLES4SAP environments. 5 | # If set to false, these packages must be installed manually before the formula 6 | # execution. 7 | # install_packages: true 8 | 9 | # saptune solution to apply to all nodes ( by default nothing is applied) 10 | # you can also use this to a single node if need to differ. see hana2 11 | # Warning: only a unique solution can exist into a node. 12 | saptune_solution: 'HANA' 13 | 14 | # HANA installation media can be provided in one of the two methods: extracted HANA platform folder or HANA media archive 15 | # 1. Path to an already extracted HANA platform installation media. This will have preference over hana installation media archive 16 | software_path: '/sapmedia/HANA/51052481' 17 | # 2. Or specify the path to the hana installation media archive 18 | # If using hana sar archive, please also provide compatible version of sapcar executable 19 | # The archive will be extracted to path specified at hana_extract_dir (optional, by default /sapmedia_extract/HANA) 20 | # hana_extract_dir should be a new directory and separated from the location where the compressed files are present, to avoid conflicts in file permissions. 21 | hana_archive_file: '/sapmedia/51053492.ZIP' 22 | hana_extract_dir: '/sapmedia_extract/HANA' 23 | 24 | # HANA Client packages are needed for monitoring & cost-optimized scenario. HANA Client is already included in HANA platform media unless a HANA database sar archive is used 25 | # If the HANA archive used for installation is sar format specified above, you need to provide HANA Client in one of the two ways: extracted HANA client folder or HANA client sar archive file 26 | # If any of the next two options are used, this HANA Client will have preference over the HANA Client coming in the HANA platform described above. If HANA platform is used, it is usually better to 27 | # not use the HANA Client as it might bring some compatibility issue. 28 | # 1. Path to already extracted hana client folder 29 | #hana_client_software_path: '/sapmedia/IMDB_CLIENT' 30 | # 2. Or specify the path to the hana client sar archive file. It will be extracted to hana_client_extract_dir path (optional, by default /sapmedia_extract/HANA_CLIENT) 31 | hana_client_archive_file: '/sapmedia/IMDB_CLIENT20_003_144-80002090.SAR' 32 | hana_client_extract_dir: '/sapmedia_extract/HANA_CLIENT' 33 | 34 | #If using a sar archive for hana platform or hana client media, please provide compatible version of sapcar executable 35 | #sapcar_exe_file: '/sapmedia/SAPCAR' 36 | 37 | # Enable HA cluster configuration. It installs the SAPHanaSR hook. 38 | # To use this option the primary and secondary nodes must be defined in the pillar file 39 | ha_enabled: true 40 | 41 | # optional: enables monitoring via hanadb_exporter (disabled by default) 42 | # the exporter will be installed and configured in all the nodes 43 | # you can customize the exporter configuration in each node in the dedicated sections below 44 | monitoring_enabled: true 45 | 46 | # HANA HA/DR provider configuration 47 | # See https://documentation.suse.com/sbp/all/single-html/SLES4SAP-hana-sr-guide-PerfOpt-15/#cha.s4s.hana-hook for details. 48 | # The SAPHanaSR hook is always enabled. 49 | # enable susTkOver hook (disabled by default) 50 | #ha_dr_sustkover_enabled: true 51 | # enable susChkSrv hook (disabled by default) 52 | #ha_dr_suschksrv_enabled: true 53 | # susChkSrv action on lost, see `man 7 susChkSrv.py` (Options: stop [default], fence) 54 | #ha_dr_suschksrv_action_on_lost: 'fence' 55 | 56 | nodes: 57 | - host: 'hana01' 58 | sid: 'prd' 59 | instance: 00 60 | password: 'Qwerty1234' 61 | install: 62 | # Specify the path to local installation media here, otherwise global variable software_path will be used for installation media. 63 | # If both of these paths are not set, hana_extract_dir path will be used for installation media, 64 | # given that hana_archive_file package is also provided 65 | #software_path: '/sapmedia/HANA/51052481' 66 | root_user: 'root' 67 | root_password: 's' 68 | # Fetch HANA passwords from XML file 69 | hdb_pwd_file: 'salt://passwords.xml' 70 | # Or specify HANA system & sapadm users' passwords like below 71 | system_user_password: 'Qwerty1234' 72 | sapadm_password: 'Qwerty1234' 73 | # You can also provide additional hana configuration parameters as done in the following example 74 | # See https://help.sap.com/viewer/2c1988d620e04368aa4103bf26f17727/2.0.00/en-US/c16432a77b6144dcb75aace2b4fcacff.html 75 | # for details on all the supported parameters 76 | extra_parameters: 77 | # As an example, this parameter allows to ignore some prerequisite tests 78 | ignore: check_min_mem 79 | # Primary SAP HANA System Replication site 80 | primary: 81 | name: NUREMBERG 82 | userkey: 83 | key_name: 'backupkey' 84 | environment: 'hana01:30013' 85 | user_name: 'SYSTEM' 86 | user_password: 'Qwerty1234' 87 | database: 'SYSTEMDB' 88 | # Take a backup on the primary SAP HANA System Replication site (prerequisite for SR) 89 | backup: 90 | # Set key_name or user_name/user_password combination 91 | key_name: 'backupkey' 92 | # Or 93 | user_name: 'SYSTEM' 94 | user_password: 'Qwerty1234' 95 | database: 'SYSTEMDB' 96 | file: 'backup' 97 | # hanadb_exporter configuration 98 | # only applies when monitoring_enabled is true 99 | exporter: 100 | exposition_port: 9668 # Optional, 9668 by default 101 | multi_tenant: true # Enable the exporter as multi tenant. This will create the connection with the SYSTEMDB database and all tenants 102 | user: 'SYSTEM' 103 | password: 'Qwerty1234' 104 | #port: 30015 # HANA database port. If multi_tenant is set this value is 3XX13 by default where XX is the instance number 105 | timeout: 30 # Timeout in seconds to start the connection with HANA database 106 | 107 | - host: 'hana02' 108 | sid: 'prd' 109 | instance: 00 110 | password: 'Qwerty1234' 111 | saptune_solution: 'MAXDB' 112 | install: 113 | software_path: '/sapmedia/HANA/51052481' 114 | root_user: 'root' 115 | root_password: 's' 116 | system_user_password: 'Qwerty1234' 117 | sapadm_password: 'Qwerty1234' 118 | # Secondary SAP HANA System Replication site 119 | secondary: 120 | name: PRAGUE 121 | remote_host: 'hana01' 122 | remote_instance: '00' 123 | replication_mode: 'sync' 124 | operation_mode: 'logreplay' 125 | # For Active/Active HANA setup 126 | #operation_mode: 'logreplay_readaccess' 127 | # If primary node is not defined the password can we set here (primary node password has preference) 128 | #primary_password: 'Qwerty1234' 129 | # Optional timeout value in seconds to wait until the primary node 130 | # 100 seconds by default 131 | primary_timeout: 100 132 | scenario_type: 'cost-optimized' 133 | cost_optimized_parameters: 134 | global_allocation_limit: '32100' 135 | preload_column_tables: False 136 | 137 | - host: hana02 138 | sid: 'qas' 139 | instance: 01 140 | password: 'Qwerty1234' 141 | install: 142 | software_path: '/sapmedia/HANA/51052481' 143 | root_user: 'root' 144 | root_password: 's' 145 | system_user_password: 'Qwerty1234' 146 | sapadm_password: 'Qwerty1234' 147 | exporter: 148 | exposition_port: 9669 # Optional, 9668 by default 149 | user: 'SYSTEM' 150 | password: 'Qwerty1234' 151 | -------------------------------------------------------------------------------- /saphanabootstrap-formula.changes: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------- 2 | Mon Nov 6 08:37:35 UTC 2023 - Eike Waldt 3 | 4 | - Version bump 0.14.0 5 | * add susChkSrv HA/DR provider 6 | * make providers configurable 7 | 8 | ------------------------------------------------------------------- 9 | Mon Nov 7 08:59:19 UTC 2022 - Steven Stringer 10 | 11 | - Version bump 0.13.1 12 | * revert changes to spec file to re-enable SLES RPM builds 13 | 14 | ------------------------------------------------------------------- 15 | Mon Aug 15 08:29:52 UTC 2022 - Eike Waldt 16 | 17 | - Version bump 0.13.0 18 | * pass sid to sudoers in a SLES12 compatible way 19 | * add location constraint to gcp_stonith 20 | 21 | ------------------------------------------------------------------- 22 | Thu Aug 11 09:19:38 UTC 2022 - Christian Schneemann 23 | 24 | - Version bump 0.12.1 25 | * moved templates dir into hana dir in repository to be gitfs compatible 26 | 27 | ------------------------------------------------------------------- 28 | Thu Jul 28 10:03:12 UTC 2022 - Eike Waldt 29 | 30 | - Version bump 0.12.0 31 | * add SAPHanaSR takeover blocker 32 | 33 | ------------------------------------------------------------------- 34 | Thu Jul 19 11:47:52 UTC 2022 - Eike Waldt 35 | 36 | - Version bump 0.11.0 37 | * use check_cmd instead of tmp sudoers file 38 | * make sudoers rules more secure 39 | * migrate sudoers to template file 40 | 41 | ------------------------------------------------------------------- 42 | Thu Jun 06 18:31:32 UTC 2022 - Eike Waldt 43 | 44 | - Version bump 0.10.1 45 | * fix hook removal conditions 46 | * fix majority_maker code on case grain is empty 47 | 48 | ------------------------------------------------------------------- 49 | Thu May 13 08:16:24 UTC 2022 - Eike Waldt 50 | 51 | - Version bump 0.10.0 52 | * allow to disable shared HANA basepath and rework add_hosts code 53 | (enables HANA scale-out on AWS) 54 | * do not edit global.ini directly (if not needed) 55 | 56 | ------------------------------------------------------------------- 57 | 58 | Thu May 05 20:18:52 UTC 2022 - Eike Waldt 59 | 60 | - Version bump 0.9.1 61 | * fix majority_maker code on case grain is empty 62 | 63 | ------------------------------------------------------------------- 64 | Thu Nov 17 13:52:34 UTC 2021 - Eike Waldt 65 | 66 | - Version bump 0.9.0 67 | * define vip_mechanism for every provider and reorder resources 68 | (same schema for all SAP related formulas) 69 | 70 | ------------------------------------------------------------------- 71 | Thu Nov 11 15:08:11 UTC 2021 - Eike Waldt 72 | 73 | - Version bump 0.8.1 74 | * use multi-target Hook on HANA scale-out 75 | 76 | ------------------------------------------------------------------- 77 | Thu Jul 29 13:21:45 UTC 2021 - Eike Waldt 78 | 79 | - Version bump 0.8.0 80 | * add HANA scale-out support 81 | * add idempotence to not affect a running HANA and cluster 82 | 83 | ------------------------------------------------------------------- 84 | Thu Jun 17 13:56:06 UTC 2021 - Eike Waldt 85 | 86 | - Version bump 0.7.2 87 | * add native fencing for microsoft-azure 88 | 89 | ------------------------------------------------------------------- 90 | Wed Jun 9 07:00:36 UTC 2021 - Eike Waldt 91 | 92 | - fixes a not working import of dbapi in SUSE/ha-sap-terraform-deployments#703 93 | - removes the installation and extraction of all hdbcli files in the /hana/shared/srHook directory 94 | - fixes execution order of srTakeover/srCostOptMemConfig hook 95 | - renames and updates hook srTakeover to srCostOptMemConfig 96 | 97 | ------------------------------------------------------------------- 98 | Fri May 7 09:15:21 UTC 2021 - Bernd Schubert 99 | 100 | - Changing exporter stickiness to => 0 and adjusting the colocation 101 | score from +inf to -inf and changing the colocation from Master to Slave. 102 | This change fix the impact of a failed exporter in regards to the HANA DB. 103 | 104 | ------------------------------------------------------------------- 105 | Thu May 6 16:19:45 UTC 2021 - Rubén Torrero Marijnissen 106 | 107 | - Document extra_parameters in pillar.example (bsc#1185643) 108 | 109 | ------------------------------------------------------------------- 110 | Thu May 6 08:49:19 UTC 2021 - Xabier Arbulu 111 | 112 | - Change hanadb_exporter default timeout value to 30 seconds 113 | 114 | ------------------------------------------------------------------- 115 | Wed Apr 28 14:26:18 UTC 2021 - Diego Akechi 116 | 117 | - Set correct stickiness for the azure-lb resource 118 | The azure-lb resource receives an stickiness=0 to not influence on 119 | transitions calculations as the HANA resources have more priority 120 | 121 | ------------------------------------------------------------------- 122 | Thu Mar 18 07:41:18 UTC 2021 - Xabier Arbulu 123 | 124 | - Version bump 0.7.1 125 | - Implement the load balancer use case for GCP in the cluster resource 126 | agents template 127 | 128 | ------------------------------------------------------------------- 129 | Fri Mar 12 15:39:31 UTC 2021 - Xabier Arbulu 130 | 131 | - Fix the HANA sidadm usage to transform to lowercase some states 132 | managing the sudoers file in ha_cluster.sls state file 133 | (bsc#1185090) 134 | 135 | ------------------------------------------------------------------- 136 | Fri Mar 5 15:30:40 UTC 2021 - Xabier Arbulu 137 | 138 | - Fix how the HANA client options are handled. Now, if any of the 139 | HANA client option variables are set, they will have preference 140 | over the database option (the platform and extracted options) 141 | 142 | ------------------------------------------------------------------- 143 | Tue Jan 19 13:35:18 UTC 2021 - Xabier Arbulu 144 | 145 | - Version bump 0.7.0 146 | * Change salt-formulas-configuration requirement in SLE12 codestream 147 | to a recommendation 148 | (bsc#1177860) 149 | 150 | ------------------------------------------------------------------- 151 | Wed Dec 2 17:48:05 UTC 2020 - Simranpal Singh 152 | 153 | - Start the saptune daemon service 154 | 155 | ------------------------------------------------------------------- 156 | Tue Nov 24 04:08:55 UTC 2020 - Simranpal Singh 157 | 158 | - Add requisite of hana installation to subsequent salt states 159 | 160 | ------------------------------------------------------------------- 161 | Wed Nov 11 04:25:03 UTC 2020 - Simranpal Singh 162 | 163 | - Add support to extract and install HANA Client sar packages 164 | 165 | ------------------------------------------------------------------- 166 | Mon Oct 19 14:48:50 UTC 2020 - Xabier Arbulu 167 | 168 | - Set the native fence mechanism usage for CSP as optional 169 | 170 | ------------------------------------------------------------------- 171 | Tue Oct 13 15:50:43 UTC 2020 - Simranpal Singh 172 | 173 | - Version 0.6.2: 174 | * Update the package version after SUMA form update and extraction logic update 175 | (jsc#SLE-4047) 176 | 177 | ------------------------------------------------------------------- 178 | Sun Oct 11 04:21:32 UTC 2020 - Simranpal Singh 179 | 180 | - Fix the hana media extraction and installation logics when using exe archives 181 | 182 | ------------------------------------------------------------------- 183 | Thu Oct 8 02:34:37 UTC 2020 - Simranpal Singh 184 | 185 | - Update the SUMA hana form metadata, to show hana form under SAP deployment group 186 | 187 | ------------------------------------------------------------------- 188 | Sat Oct 3 04:59:38 UTC 2020 - Simranpal Singh 189 | 190 | - Update SUMA form.yml file and prevalidation state with latest changes in formula 191 | 192 | ------------------------------------------------------------------- 193 | Thu Sep 24 20:23:13 UTC 2020 - Simranpal Singh 194 | 195 | - Change the default 'hana_extract_dir' hana media extraction location 196 | 197 | ------------------------------------------------------------------- 198 | Tue Sep 22 13:38:47 UTC 2020 - Dario Maiocchi 199 | 200 | - Version 0.6.1: 201 | * Remove copy of config files for exporters since we use /usr/etc 202 | 203 | ------------------------------------------------------------------- 204 | Thu Aug 20 02:00:46 UTC 2020 - Simranpal Singh 205 | 206 | - Version 0.6.0 207 | * Include pillar example file in package 208 | (bsc#1174994, jsc#SLE-4047) 209 | 210 | ------------------------------------------------------------------- 211 | Wed Jul 15 10:30:22 UTC 2020 - Xabier Arbulu 212 | 213 | - Add hana active/active resources to the cluster template 214 | - Change `route_table` by `route_name` to make the variable usage 215 | more meaningful 216 | 217 | ------------------------------------------------------------------- 218 | Wed Jun 10 01:49:02 UTC 2020 - Simranpal Singh 219 | 220 | - Add support to extract zip,rar,exe,sar hana media 221 | - This change brings non backward compatible changes. The variable 222 | hdbserver_extract_dir is replaced by hana_extract_dir 223 | 224 | ------------------------------------------------------------------- 225 | Fri Jun 5 14:42:12 UTC 2020 - Stefano Torresi 226 | 227 | - Fix provisioning of hanadb_exporter in SLE12, where python3-pip must be always installed. 228 | 229 | ------------------------------------------------------------------- 230 | Tue Jun 2 00:21:55 UTC 2020 - Simranpal Singh 231 | 232 | - Version 0.5.10 233 | * Change colocation weight for col_saphana_ip for Azure provider \ 234 | 235 | (jsc#ECO-1965, jsc#SLE-4047) 236 | 237 | ------------------------------------------------------------------- 238 | Fri May 15 08:19:31 UTC 2020 - Stefano Torresi 239 | 240 | - Implement exporter Pacemaker resource, refactor monitoring settings 241 | 242 | ------------------------------------------------------------------- 243 | Fri May 15 01:55:38 UTC 2020 - Simranpal Singh 244 | 245 | - Version 0.5.9 246 | * Update extraction functionality for cost optimized scenario 247 | 248 | ------------------------------------------------------------------- 249 | Tue Apr 21 02:21:28 UTC 2020 - Simranpal Singh 250 | 251 | - Version 0.5.8 252 | * Adjust software_path usage for cost optimized scenario 253 | 254 | ------------------------------------------------------------------- 255 | Fri Apr 17 08:14:08 UTC 2020 - Xabier Arbulu 256 | 257 | - Version 0.5.7 258 | * Use gcp instance name instead of id in fence_gce agent 259 | (bsc#1161898, bsc#1160933) 260 | 261 | ------------------------------------------------------------------- 262 | Tue Apr 14 13:28:03 UTC 2020 - Xabier Arbulu 263 | 264 | - Version 0.5.6 265 | * Update anything socat resource by azure-lb as recommended in 266 | the updated best practices guide 267 | 268 | ------------------------------------------------------------------- 269 | Tue Mar 31 07:25:29 UTC 2020 - Xabier Arbulu 270 | 271 | - Version 0.5.5 272 | * Implement the new SAPHanaSR hook usage 273 | 274 | ------------------------------------------------------------------- 275 | Fri Mar 27 11:16:16 UTC 2020 - Xabier Arbulu 276 | 277 | - Version 0.5.4 278 | * Update the fence_gce usage to use gcp_instance_id 279 | 280 | ------------------------------------------------------------------- 281 | Wed Mar 25 21:34:17 UTC 2020 - Simranpal Singh 282 | 283 | - Version 0.5.3 284 | * Add support to run sapcar and extract HANA sar package 285 | 286 | ------------------------------------------------------------------- 287 | Fri Mar 20 13:19:03 UTC 2020 - Xabier Arbulu 288 | 289 | - Version 0.5.2 290 | * Install pydbapi package to be used by hanadb_exporter 291 | 292 | ------------------------------------------------------------------- 293 | Tue Mar 3 09:51:59 UTC 2020 - Xabier Arbulu 294 | 295 | - Version 0.5.1 296 | * Adapt the cluster template to use the proper gcp agents 297 | (bsc#1161898, bsc#1160933) 298 | 299 | ------------------------------------------------------------------- 300 | Tue Feb 11 10:51:56 UTC 2020 - Dario Maiocchi 301 | 302 | - Version 0.5.0 303 | * Change the package name to prometheus-hanadb_exporter (jsc#SLE-10545) 304 | (bsc#1165156, jsc#SLE-4143, boo#1137989, jsc#SLE-10545) 305 | 306 | ------------------------------------------------------------------- 307 | Thu Jan 30 15:31:06 UTC 2020 - Xabier Arbulu 308 | 309 | - Version 0.4.4 310 | * Update HANA resources template to add AWS specific RA 311 | 312 | ------------------------------------------------------------------- 313 | Tue Jan 21 14:41:29 UTC 2020 - Dario Maiocchi 314 | 315 | - Version 0.4.3 316 | * Add saptune module to apply a specific solution 317 | 318 | ------------------------------------------------------------------- 319 | Thu Jan 2 23:25:35 UTC 2020 - Simranpal Singh 320 | 321 | - Version 0.4.2 322 | * Add the option to specify password XML file for installing hana 323 | 324 | ------------------------------------------------------------------- 325 | Thu Dec 19 12:18:22 UTC 2019 - Xabier Arbulu 326 | 327 | - Version 0.4.1 328 | * Update cloud provider usage 329 | 330 | ------------------------------------------------------------------- 331 | Thu Dec 12 11:52:23 UTC 2019 - Xabier Arbulu 332 | 333 | - Version bump 0.4.0 334 | * Update exporter state to use the new hanadb_exporter config 335 | folder 336 | * Clean defaults.yaml file content 337 | * Fix the scenario where the primary was not defined during 338 | secondary installation. Now the password can be retrieved using 339 | other options 340 | * Update the scale up template to remove the default value for nic 341 | parameter in the IPaddr2 resource if the provider is not gcp 342 | 343 | ------------------------------------------------------------------- 344 | Thu Nov 28 21:28:19 UTC 2019 - Simranpal Singh 345 | 346 | - Version bump 0.3.3 347 | * Update Cluster template command to use `socat` for Azure 348 | 349 | ------------------------------------------------------------------- 350 | Wed Oct 30 16:14:32 UTC 2019 - Xabier Arbulu 351 | 352 | - Version bump 0.3.2 353 | * Fix issue with file permissions during package installation in 354 | /usr/share/salt-formulas (0755, root, salt) 355 | (boo#1142306) 356 | 357 | ------------------------------------------------------------------- 358 | Tue Oct 29 16:31:16 UTC 2019 - Xabier Arbulu 359 | 360 | - Version bump 0.3.1 361 | * Fix 'true' rendering in jinja to convert to json correctly 362 | 363 | ------------------------------------------------------------------- 364 | Fri Oct 25 07:22:15 UTC 2019 - Xabier Arbulu 365 | 366 | - Version bump 0.3.0 367 | * Add support for multi tenant hanadb_exporter 368 | 369 | ------------------------------------------------------------------- 370 | Wed Sep 4 06:55:01 UTC 2019 - Xabier Arbulu Insausti 371 | 372 | - Update the pkg.info_available call to avoid repositories refresh as 373 | it may cause errors 374 | 375 | ------------------------------------------------------------------- 376 | Wed Aug 7 07:46:25 UTC 2019 - Simranpal Singh 377 | 378 | - Version bump 0.2.9 379 | * Fix srHook script usage for cost optimized scenario 380 | * Add scenario type options to the form.yml file 381 | (boo#1137989) 382 | 383 | ------------------------------------------------------------------- 384 | Wed Jul 31 12:26:05 UTC 2019 - Xabier Arbulu Insausti 385 | 386 | - Fix errors in the form.yml file to match with the formula names 387 | - Fix some styling issues 388 | 389 | ------------------------------------------------------------------- 390 | Tue Jul 23 11:17:36 UTC 2019 - Xabier Arbulu Insausti 391 | 392 | - Version bump 0.2.8 393 | * Add support for Power machines 394 | * Fix issues with SAP HANA deployment template and the exporter 395 | 396 | ------------------------------------------------------------------- 397 | Mon Jul 22 08:26:16 UTC 2019 - Xabier Arbulu Insausti 398 | 399 | - Version bump 0.2.7 400 | * Fix issue with file permissions during package installation in 401 | /usr/share/salt-formulas 402 | (boo#1142306) 403 | 404 | ------------------------------------------------------------------- 405 | Mon Jul 8 11:49:55 UTC 2019 - Dario Maiocchi 406 | 407 | - Version bump 0.2.6 408 | * Retry pkg.install multiple times, in case a pkg installation fails 409 | for having a more resilient installation. 410 | 411 | ------------------------------------------------------------------- 412 | Wed Jul 3 07:58:55 UTC 2019 - Xabier Arbulu Insausti 413 | 414 | - Version bump 0.2.5 415 | * hanadb_exporter executed as a daemon 416 | * hanadb_exporter installation suggested 417 | 418 | ------------------------------------------------------------------- 419 | Tue Jul 2 11:14:50 UTC 2019 - Diego Vinicius Akechi 420 | 421 | - Version bump 0.2.4 422 | * Change the salt-formula directories permissions to 0750 to avoid 423 | conflicts with the package salt-standalone-formulas-configuration. 424 | 425 | * Correct the required package name to 426 | salt-standalone-formulas-configuration 427 | 428 | ------------------------------------------------------------------- 429 | Mon Jul 1 11:27:50 UTC 2019 - Xabier Arbulu Insausti 430 | 431 | - Create package version 0.2.3 supporting hanadb_exporter logging system 432 | 433 | ------------------------------------------------------------------- 434 | Wed Jun 19 14:23:59 UTC 2019 - Xabier Arbulu Insausti 435 | 436 | - Create package version 0.2.2 adding hanadb_exporter deployment 437 | 438 | ------------------------------------------------------------------- 439 | Tue Jun 11 11:42:31 UTC 2019 - Xabier Arbulu Insausti 440 | 441 | - Create package version 0.2.1 with fixed spec files. Now the package 442 | is available in all SLE12 and SLE15 versions (boo#1137989, jsc#SLE-4143) 443 | 444 | ------------------------------------------------------------------- 445 | Thu Jun 6 07:28:43 UTC 2019 - Xabier Arbulu Insausti 446 | 447 | - Fix how qas instance number is created in scale_up_resources template 448 | 449 | ------------------------------------------------------------------- 450 | Fri May 21 12:37:43 UTC 2019 - Diego Vinicius Akechi 451 | 452 | - Version bump 0.2.0 453 | * Include the salt-formulas-configuration dependency on 454 | SLE/Leap 15-SP1 and higher. This package configures the shared salt 455 | formulas location (/usr/share/salt-formulas) to be used by SUMA 4.0 456 | or salt in standalone mode. 457 | * Drops the saphanabootstrap-formula-suma package, as the forms metadata 458 | will be available only on SUMA 4.0 using the shared location. 459 | 460 | ------------------------------------------------------------------- 461 | Thu May 16 08:52:06 UTC 2019 - Xabier Arbulu Insausti 462 | 463 | - Update formula to work with the latest shaptools code. In this version 464 | the secondary node registration is managed completely in shaptools 465 | 466 | ------------------------------------------------------------------- 467 | Thu Apr 25 12:06:43 UTC 2019 - Diego Vinicius Akechi 468 | 469 | - Changed requires from salt-saphana to salt-shaptools. 470 | 471 | ------------------------------------------------------------------- 472 | Mon Mar 18 08:50:43 UTC 2019 - Xabier Arbulu Insausti 473 | 474 | - Update primary available checking to execute this action before 475 | trying to copy the SSFS files from primary node 476 | - Add configurable timeout to wait to the primary node 477 | 478 | ------------------------------------------------------------------- 479 | Tue Mar 12 07:52:37 UTC 2019 - Xabier Arbulu Insausti 480 | 481 | - Fix OS release comparison to use integer type in order to choose 482 | installable python version 483 | 484 | ------------------------------------------------------------------- 485 | Fri Mar 8 13:47:41 UTC 2019 - Xabier Arbulu Insausti 486 | 487 | - Improve shaptools installation python version management 488 | 489 | ------------------------------------------------------------------- 490 | Mon Mar 4 15:30:24 UTC 2019 - xarbulu@suse.com 491 | 492 | - Improved the use of keystore access. When the key_name is informed, 493 | the user_name/user_password is not needed. 494 | 495 | ------------------------------------------------------------------- 496 | Wed Feb 25 10:15:35 UTC 2019 - dakechi@suse.com 497 | 498 | - Forces lower case SID when composing the OS username 499 | - Typos fixing. 500 | - Adjust the labels to SAP terminology and include help texts. 501 | 502 | ------------------------------------------------------------------- 503 | Wed Feb 20 08:18:35 UTC 2019 - xarbulu@suse.com 504 | 505 | - Add templates folder with RA configuration templates 506 | 507 | ------------------------------------------------------------------- 508 | Thu Dec 20 08:33:10 UTC 2018 - xarbulu@suse.com 509 | 510 | - First version of the SAP HANA deployment formula 511 | -------------------------------------------------------------------------------- /saphanabootstrap-formula.spec: -------------------------------------------------------------------------------- 1 | # 2 | # spec file for package saphanabootstrap-formula 3 | # 4 | # Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany. 5 | # 6 | # All modifications and additions to the file contributed by third parties 7 | # remain the property of their copyright owners, unless otherwise agreed 8 | # upon. The license for this file, and modifications and additions to the 9 | # file, is the same license as for the pristine package itself (unless the 10 | # license for the pristine package is not an Open Source License, in which 11 | # case the license is the MIT License). An "Open Source License" is a 12 | # license that conforms to the Open Source Definition (Version 1.9) 13 | # published by the Open Source Initiative. 14 | 15 | # Please submit bugfixes or comments via https://bugs.opensuse.org/ 16 | # 17 | 18 | 19 | # See also http://en.opensuse.org/openSUSE:Specfile_guidelines 20 | 21 | Name: saphanabootstrap-formula 22 | Version: 0 23 | Release: 0 24 | Summary: SAP HANA platform deployment formula 25 | License: Apache-2.0 26 | 27 | Url: https://github.com/SUSE/%{name} 28 | Source0: %{name}-%{version}.tar.gz 29 | BuildRoot: %{_tmppath}/%{name}-%{version}-build 30 | BuildArch: noarch 31 | Requires: habootstrap-formula 32 | Requires: salt-shaptools 33 | %if 0%{?suse_version} < 1500 34 | Recommends: salt-formulas-configuration 35 | %else 36 | Requires: salt-formulas-configuration 37 | %endif 38 | Suggests: prometheus-hanadb_exporter >= 0.7.0 39 | 40 | %define fname hana 41 | %define fdir %{_datadir}/salt-formulas 42 | %define ftemplates templates 43 | 44 | %description 45 | SAP HANA deployment salt formula. This formula is capable to install 46 | SAP HANA nodes, enable system replication and configure SLE-HA cluster 47 | with the SAPHanaSR resource agent, using standalone salt or via SUSE Manager 48 | formulas with forms, available on SUSE Manager 4.0. 49 | 50 | In order to use the formula, salt must be available in the system. The package comes automatically 51 | in SLE15. To use it in SLE12, salt (and it sub-components) comes from the Advanced systems management 52 | module, which can be added running the `SUSEConnect -p sle-module-adv-systems-management/12/{{ arch }}` 53 | 54 | %prep 55 | %setup -q 56 | 57 | %build 58 | 59 | %install 60 | 61 | mkdir -p %{buildroot}%{fdir}/states/%{fname} 62 | mkdir -p %{buildroot}%{fdir}/metadata/%{fname} 63 | cp -R %{fname} %{buildroot}%{fdir}/states 64 | cp -R form.yml pillar.example %{buildroot}%{fdir}/metadata/%{fname} 65 | if [ -f metadata.yml ] 66 | then 67 | cp -R metadata.yml %{buildroot}%{fdir}/metadata/%{fname} 68 | fi 69 | 70 | 71 | %files 72 | %defattr(-,root,root,-) 73 | %if 0%{?sle_version} < 120300 74 | %doc README.md LICENSE 75 | %else 76 | %doc README.md 77 | %license LICENSE 78 | %endif 79 | 80 | %dir %attr(0755, root, salt) %{fdir} 81 | %dir %attr(0755, root, salt) %{fdir}/states 82 | %dir %attr(0755, root, salt) %{fdir}/metadata 83 | 84 | %attr(0755, root, salt) %{fdir}/states/%{fname} 85 | %attr(0755, root, salt) %{fdir}/states/%{fname}/%{ftemplates} 86 | %attr(0755, root, salt) %{fdir}/metadata/%{fname} 87 | 88 | %changelog 89 | --------------------------------------------------------------------------------